repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daler/gffutils | gffutils/helpers.py | 1 | 18443 | import copy
import sys
import os
import simplejson as json
import time
import tempfile
import six
from gffutils import constants
from gffutils import bins
import gffutils
from gffutils import gffwriter
from gffutils import parser
from gffutils.attributes import dict_class
HERE = os.path.dirname(os.path.abspath(__file__))
def example_filename(fn):
"""
Return the full path of a data file that ships with gffutils.
"""
return os.path.join(HERE, "test", "data", fn)
def infer_dialect(attributes):
"""
Infer the dialect based on the attributes.
Parameters
----------
attributes : str
A single attributes string from a GTF or GFF line
Returns
-------
Dictionary representing the inferred dialect
"""
attributes, dialect = parser._split_keyvals(attributes)
return dialect
def _choose_dialect(features):
"""
Given a list of features (often from peeking into an iterator), choose
a dialect to use as the "canonical" version.
If `features` is an empty list, then use the default GFF3 dialect
Parameters
----------
features : iterable
iterable of features
Returns
-------
dict
"""
# NOTE: can use helpers.dialect_compare if you need to make this more
# complex....
if len(features) == 0:
return constants.dialect
# Structure of `count` will be, e.g.,
#
# {
# 'keyval separator': {'=': 35},
# 'trailing semicolon': {True: 30, False: 5},
# ...(other dialect keys here)...
# }
#
# In this example, all features agreed on keyval separeator. For trailing
# semicolon, there was a higher weight for True, so that will be selected
# for the final dialect.
count = {k: {} for k in constants.dialect.keys()}
for feature in features:
# Number of attributes is currently being used as the weight for
# dialect selection. That is, more complex attribute strings are more
# likely to be informative when determining dialect. This is important
# for e.g., #128, where there is equal representation of long and short
# attributes -- but only the longer attributes correctly have ";
# " field separators.
weight = len(feature.attributes)
for k, v in feature.dialect.items():
if isinstance(v, list):
v = tuple(v)
val = count[k].get(v, 0)
# Increment the observed value by the number of attributes (so more
# complex attribute strings have higher weight in determining
# dialect)
count[k][v] = val + weight
final_dialect = {}
for k, v in count.items():
# Tuples of (entry, total weight) in descending sort
vs = sorted(v.items(), key=lambda x: x[1], reverse=True)
# So the first tuple's first item is the winning value for this dialect
# key.
final_dialect[k] = vs[0][0]
# For backwards compatibility, to figure out the field order to use for the
# dialect we append additional fields as they are observed, giving priority
# to attributes that come first in earlier features. The alternative would
# be to give preference to the most-common order of attributes.
final_order = []
for feature in features:
for o in feature.attributes.keys():
if o not in final_order:
final_order.append(o)
final_dialect["order"] = final_order
return final_dialect
def make_query(
args,
other=None,
limit=None,
strand=None,
featuretype=None,
extra=None,
order_by=None,
reverse=False,
completely_within=False,
):
"""
Multi-purpose, bare-bones ORM function.
This function composes queries given some commonly-used kwargs that can be
passed to FeatureDB methods (like .parents(), .children(), .all_features(),
.features_of_type()). It handles, in one place, things like restricting to
featuretype, limiting to a genomic range, limiting to one strand, or
returning results ordered by different criteria.
Additional filtering/subsetting/sorting behavior should be added here.
(Note: this ended up having better performance (and flexibility) than
sqlalchemy)
This function also provides support for additional JOINs etc (supplied via
the `other` kwarg) and extra conditional clauses (`extra` kwarg). See the
`_QUERY` var below for the order in which they are used.
For example, FeatureDB._relation uses `other` to supply the JOIN
substatment, and that same method also uses `extra` to supply the
"relations.level = ?" substatment (see the source for FeatureDB._relation
for more details).
`args` contains the arguments that will ultimately be supplied to the
sqlite3.connection.execute function. It may be further populated below --
for example, if strand="+", then the query will include a strand clause,
and the strand will be appended to the args.
`args` can be pre-filled with args that are passed to `other` and `extra`.
"""
_QUERY = "{_SELECT} {OTHER} {EXTRA} {FEATURETYPE} " "{LIMIT} {STRAND} {ORDER_BY}"
# Construct a dictionary `d` that will be used later as _QUERY.format(**d).
# Default is just _SELECT, which returns all records in the features table.
# (Recall that constants._SELECT gets the fields in the order needed to
# reconstruct a Feature)
d = dict(
_SELECT=constants._SELECT,
OTHER="",
FEATURETYPE="",
LIMIT="",
STRAND="",
ORDER_BY="",
EXTRA="",
)
if other:
d["OTHER"] = other
if extra:
d["EXTRA"] = extra
# If `other` and `extra` take args (that is, they have "?" in them), then
# they should have been provided in `args`.
required_args = (d["EXTRA"] + d["OTHER"]).count("?")
if len(args) != required_args:
raise ValueError("Not enough args (%s) for subquery" % args)
# Below, if a kwarg is specified, then we create sections of the query --
# appending to args as necessary.
#
# IMPORTANT: the order in which things are processed here is the same as
# the order of the placeholders in _QUERY. That is, we need to build the
# args in parallel with the query to avoid putting the wrong args in the
# wrong place.
if featuretype:
# Handle single or iterables of featuretypes.
#
# e.g., "featuretype = 'exon'"
#
# or, "featuretype IN ('exon', 'CDS')"
if isinstance(featuretype, six.string_types):
d["FEATURETYPE"] = "features.featuretype = ?"
args.append(featuretype)
else:
d["FEATURETYPE"] = "features.featuretype IN (%s)" % (
",".join(["?" for _ in featuretype])
)
args.extend(featuretype)
if limit:
# Restrict to a genomic region. Makes use of the UCSC binning strategy
# for performance.
#
# `limit` is a string or a tuple of (chrom, start, stop)
#
# e.g., "seqid = 'chr2L' AND start > 1000 AND end < 5000"
if isinstance(limit, six.string_types):
seqid, startstop = limit.split(":")
start, end = startstop.split("-")
else:
seqid, start, end = limit
# Identify possible bins
_bins = bins.bins(int(start), int(end), one=False)
# Use different overlap conditions
if completely_within:
d["LIMIT"] = (
"features.seqid = ? AND features.start >= ? " "AND features.end <= ?"
)
args.extend([seqid, start, end])
else:
d["LIMIT"] = (
"features.seqid = ? AND features.start <= ? " "AND features.end >= ?"
)
# Note order (end, start)
args.extend([seqid, end, start])
# Add bin clause. See issue #45.
if len(_bins) < 900:
d["LIMIT"] += " AND features.bin IN (%s)" % (",".join(map(str, _bins)))
if strand:
# e.g., "strand = '+'"
d["STRAND"] = "features.strand = ?"
args.append(strand)
# TODO: implement file_order!
valid_order_by = constants._gffkeys_extra + ["file_order", "length"]
_order_by = []
if order_by:
# Default is essentially random order.
#
# e.g. "ORDER BY seqid, start DESC"
if isinstance(order_by, six.string_types):
_order_by.append(order_by)
else:
for k in order_by:
if k not in valid_order_by:
raise ValueError(
"%s not a valid order-by value in %s" % (k, valid_order_by)
)
# There's no length field, so order by end - start
if k == "length":
k = "(end - start)"
_order_by.append(k)
_order_by = ",".join(_order_by)
if reverse:
direction = "DESC"
else:
direction = "ASC"
d["ORDER_BY"] = "ORDER BY %s %s" % (_order_by, direction)
# Ensure only one "WHERE" is included; the rest get "AND ". This is ugly.
where = False
if "where" in d["OTHER"].lower():
where = True
for i in ["EXTRA", "FEATURETYPE", "LIMIT", "STRAND"]:
if d[i]:
if not where:
d[i] = "WHERE " + d[i]
where = True
else:
d[i] = "AND " + d[i]
return _QUERY.format(**d), args
def _bin_from_dict(d):
"""
Given a dictionary yielded by the parser, return the genomic "UCSC" bin
"""
try:
start = int(d["start"])
end = int(d["end"])
return bins.bins(start, end, one=True)
# e.g., if "."
except ValueError:
return None
def _jsonify(x):
"""Use most compact form of JSON"""
if isinstance(x, dict_class):
return json.dumps(x._d, separators=(",", ":"))
return json.dumps(x, separators=(",", ":"))
def _unjsonify(x, isattributes=False):
"""Convert JSON string to an ordered defaultdict."""
if isattributes:
obj = json.loads(x)
return dict_class(obj)
return json.loads(x)
def _feature_to_fields(f, jsonify=True):
"""
Convert feature to tuple, for faster sqlite3 import
"""
x = []
for k in constants._keys:
v = getattr(f, k)
if jsonify and (k in ("attributes", "extra")):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x)
def _dict_to_fields(d, jsonify=True):
"""
Convert dict to tuple, for faster sqlite3 import
"""
x = []
for k in constants._keys:
v = d[k]
if jsonify and (k in ("attributes", "extra")):
x.append(_jsonify(v))
else:
x.append(v)
return tuple(x)
def asinterval(feature):
"""
Converts a gffutils.Feature to a pybedtools.Interval
"""
import pybedtools
return pybedtools.create_interval_from_list(str(feature).split("\t"))
def merge_attributes(attr1, attr2, numeric_sort=False):
"""
Merges two attribute dictionaries into a single dictionary.
Parameters
----------
`attr1`, `attr2` : dict
numeric_sort : bool
If True, then attempt to convert all values for a key into floats, sort
them numerically, and return the original strings in numerical order.
Default is False for performance.
Returns
-------
dict
"""
new_d = copy.deepcopy(attr1)
new_d.update(copy.deepcopy(attr2))
# all of attr2 key : values just overwrote attr1, fix it
for k, v in new_d.items():
if not isinstance(v, list):
new_d[k] = [v]
for k, v in six.iteritems(attr1):
if k in attr2:
if not isinstance(v, list):
v = [v]
new_d[k].extend(v)
if not numeric_sort:
return dict((k, sorted(set(v))) for k, v in new_d.items())
final_d = {}
for key, values in new_d.items():
try:
# I.e.:
#
# ['5', '4.2']
#
# becomes the sorted tuples:
#
# [(4.2, '4.2'), ('5.0', '5')]
#
# from which original strings are pulled to get the
# numerically-sorted strings,
#
# ['4.2', '5']
sorted_numeric = sorted([(float(v), v) for v in set(values)])
new_values = [i[1] for i in sorted_numeric]
except ValueError:
# E.g., not everything was able to be converted into a number
new_values = sorted(set(values))
final_d[key] = new_values
return final_d
def dialect_compare(dialect1, dialect2):
"""
Compares two dialects.
"""
orig = set(dialect1.items())
new = set(dialect2.items())
return dict(
added=dict(list(new.difference(orig))), removed=dict(list(orig.difference(new)))
)
def sanitize_gff_db(db, gid_field="gid"):
"""
Sanitize given GFF db. Returns a sanitized GFF db.
Sanitizing means:
- Ensuring that start < stop for all features
- Standardizing gene units by adding a 'gid' attribute
that makes the file grep-able
TODO: Do something with negative coordinates?
"""
def sanitized_iterator():
# Iterate through the database by each gene's records
for gene_recs in db.iter_by_parent_childs():
# The gene's ID
gene_id = gene_recs[0].id
for rec in gene_recs:
# Fixup coordinates if necessary
if rec.start > rec.stop:
rec.start, rec.stop = rec.stop, rec.start
# Add a gene id field to each gene's records
rec.attributes[gid_field] = [gene_id]
yield rec
# Return sanitized GFF database
sanitized_db = gffutils.create_db(sanitized_iterator(), ":memory:", verbose=False)
return sanitized_db
def sanitize_gff_file(gff_fname, in_memory=True, in_place=False):
"""
Sanitize a GFF file.
"""
db = None
if is_gff_db(gff_fname):
# It's a database filename, so load it
db = gffutils.FeatureDB(gff_fname)
else:
# Need to create a database for file
if in_memory:
db = gffutils.create_db(gff_fname, ":memory:", verbose=False)
else:
db = get_gff_db(gff_fname)
if in_place:
gff_out = gffwriter.GFFWriter(gff_fname, in_place=in_place)
else:
gff_out = gffwriter.GFFWriter(sys.stdout)
sanitized_db = sanitize_gff_db(db)
for gene_rec in sanitized_db.all_features(featuretype="gene"):
gff_out.write_gene_recs(sanitized_db, gene_rec.id)
gff_out.close()
def annotate_gff_db(db):
"""
Annotate a GFF file by cross-referencing it with another GFF
file, e.g. one containing gene models.
"""
pass
def is_gff_db(db_fname):
"""
Return True if the given filename is a GFF database.
For now, rely on .db extension.
"""
if not os.path.isfile(db_fname):
return False
if db_fname.endswith(".db"):
return True
return False
def to_unicode(obj, encoding="utf-8"):
if isinstance(obj, six.string_types):
if not isinstance(obj, six.text_type):
obj = six.text_type(obj, encoding)
return obj
def canonical_transcripts(db, fasta_filename):
"""
WARNING: this function is currently not well ttested and will likely be
replaced with a more modular approach.
"""
import pyfaidx
fasta = pyfaidx.Fasta(fasta_filename, as_raw=False)
for gene in db.features_of_type("gene"):
# exons_list will contain (CDS_length, total_length, transcript, [exons]) tuples.
exon_list = []
for ti, transcript in enumerate(db.children(gene, level=1)):
cds_len = 0
total_len = 0
exons = list(db.children(transcript, level=1))
for exon in exons:
exon_length = len(exon)
if exon.featuretype == "CDS":
cds_len += exon_length
total_len += exon_length
exon_list.append((cds_len, total_len, transcript, exons if cds_len == 0 else [e for e in exons if e.featuretype in ['CDS', 'five_prime_UTR', 'three_prime_UTR']]))
# If we have CDS, then use the longest coding transcript
if max(i[0] for i in exon_list) > 0:
best = sorted(exon_list, key=lambda x: x[0], reverse=True)[0]
# Otherwise, just choose the longest
else:
best = sorted(exon_list, key=lambda x: x[1])[0]
print(best)
canonical_exons = best[-1]
transcript = best[-2]
seqs = [i.sequence(fasta) for i in sorted(canonical_exons, key=lambda x: x.start, reverse=transcript.strand != '+')]
yield transcript, "".join(seqs)
##
## Helpers for gffutils-cli
##
## TODO: move clean_gff here?
##
def get_gff_db(gff_fname, ext=".db"):
"""
Get db for GFF file. If the database has a .db file,
load that. Otherwise, create a named temporary file,
serialize the db to that, and return the loaded database.
"""
if not os.path.isfile(gff_fname):
# Not sure how we should deal with errors normally in
# gffutils -- Ryan?
raise ValueError("GFF %s does not exist." % (gff_fname))
candidate_db_fname = "%s.%s" % (gff_fname, ext)
if os.path.isfile(candidate_db_fname):
# Standard .db file found, so return it
return candidate_db_fname
# Otherwise, we need to create a temporary but non-deleted
# file to store the db in. It'll be up to the user
# of the function the delete the file when done.
## NOTE: Ryan must have a good scheme for dealing with this
## since pybedtools does something similar under the hood, i.e.
## creating temporary files as needed without over proliferation
db_fname = tempfile.NamedTemporaryFile(delete=False)
# Create the database for the gff file (suppress output
# when using function internally)
print("Creating db for %s" % (gff_fname))
t1 = time.time()
db = gffutils.create_db(
gff_fname, db_fname.name, merge_strategy="merge", verbose=False
)
t2 = time.time()
print(" - Took %.2f seconds" % (t2 - t1))
return db
| mit | 3d71b02489738eea4918c6b674087f7b | 30.20643 | 174 | 0.588408 | 3.816846 | false | false | false | false |
daler/gffutils | gffutils/convert.py | 1 | 1389 | """
Conversion functions that operate on :class:`FeatureDB` classes.
"""
import six
def to_bed12(f, db, child_type="exon", name_field="ID"):
"""
Given a top-level feature (e.g., transcript), construct a BED12 entry
Parameters
----------
f : Feature object or string
This is the top-level feature represented by one BED12 line. For
a canonical GFF or GTF, this will generally be a transcript.
db : a FeatureDB object
This is need to get the children for the feature
child_type : str
Featuretypes that will be represented by the BED12 "blocks". Typically
"exon".
name_field : str
Attribute to be used in the "name" field of the BED12 entry. Usually
"ID" for GFF; "transcript_id" for GTF.
"""
if isinstance(f, six.string_types):
f = db[f]
children = list(db.children(f, featuretype=child_type, order_by="start"))
sizes = [len(i) for i in children]
starts = [i.start - f.start for i in children]
fields = [
f.chrom,
f.start - 1, # GTF -> BED coord system
f.stop,
f.attributes.get(name_field, ["."])[0],
f.score,
f.strand,
f.start,
f.stop,
"0,0,0",
len(children),
",".join(map(str, sizes)),
",".join(map(str, starts)),
]
return "\t".join(map(str, fields)) + "\n"
| mit | a2ebf2067cc09c8d6168b3312584fadd | 30.568182 | 79 | 0.579554 | 3.507576 | false | false | false | false |
daler/gffutils | gffutils/test/test_issues.py | 1 | 15046 | """
Tests for specific issues and pull requests
"""
import os
import tempfile
import difflib
from textwrap import dedent
import gffutils
from gffutils import feature
from gffutils import merge_criteria as mc
from nose.tools import assert_raises
def test_issue_79():
gtf = gffutils.example_filename("keep-order-test.gtf")
db = gffutils.create_db(
gtf,
"tmp.db",
disable_infer_genes=False,
disable_infer_transcripts=False,
id_spec={"gene": "gene_id", "transcript": "transcript_id"},
merge_strategy="create_unique",
keep_order=True,
force=True,
)
exp = open(gtf).read()
obs = "\n".join([str(i) for i in db.all_features()])
exp_1 = exp.splitlines(True)[0].strip()
obs_1 = obs.splitlines(True)[0].strip()
print("EXP")
print(exp_1)
print("OBS")
print(obs_1)
print("DIFF")
print("".join(difflib.ndiff([exp_1], [obs_1])))
assert obs_1 == exp_1
def test_issue_82():
# key-val separator is inside an unquoted attribute value
x = (
"Spenn-ch12\tsgn_markers\tmatch\t2621812\t2622049\t.\t+\t.\t"
"Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126"
)
y = feature.feature_from_line(x)
assert y.attributes["Note"] == [
"marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126"
]
gffutils.create_db(gffutils.example_filename("keyval_sep_in_attrs.gff"), ":memory:")
def test_issue_85():
# when start or stop was empty, #85 would fail Should now work with
# blank fields
f = feature.feature_from_line("\t".join([""] * 9))
# or with "." placeholders
f = feature.feature_from_line("\t".join(["."] * 9))
def test_issue_105():
fn = gffutils.example_filename("FBgn0031208.gtf")
home = os.path.expanduser("~")
newfn = os.path.join(home, ".gffutils.test")
with open(newfn, "w") as fout:
fout.write(open(fn).read())
f = gffutils.iterators.DataIterator(newfn)
for i in f:
pass
os.unlink(newfn)
def test_issue_107():
s = dedent(
"""
chr1\t.\tgene\t10\t15\t.\t+\t.\tID=b;
chr1\t.\tgene\t1\t5\t.\t-\t.\tID=a;
chr2\t.\tgene\t25\t50\t.\t-\t.\tID=c;
chr2\t.\tgene\t55\t60\t.\t-\t.\tID=d;
"""
)
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write(s + "\n")
db = gffutils.create_db(tmp, ":memory:")
interfeatures = list(
db.interfeatures(db.features_of_type("gene", order_by=("seqid", "start")))
)
assert [str(i) for i in interfeatures] == [
"chr1\tgffutils_derived\tinter_gene_gene\t6\t9\t.\t.\t.\tID=a,b;",
"chr2\tgffutils_derived\tinter_gene_gene\t51\t54\t.\t-\t.\tID=c,d;",
]
def test_issue_119():
# First file has these two exons with no ID:
#
# chr2L FlyBase exon 8193 8589 . + . Parent=FBtr0300690
# chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690
#
db0 = gffutils.create_db(gffutils.example_filename("FBgn0031208.gff"), ":memory:")
# And this one, a bunch of reads with no IDs anywhere
db1 = gffutils.create_db(
gffutils.example_filename("F3-unique-3.v2.gff"), ":memory:"
)
# When db1 is updated by db0
db2 = db1.update(db0)
assert (
db2._autoincrements == db1._autoincrements == {"exon": 2, "read": 112}
), db2._autoincrements
assert len(list(db0.features_of_type("exon"))) == 6
# Now we update that with db0 again
db3 = db2.update(db0, merge_strategy="replace")
# Using the "replace" strategy, we should have only gotten another 2 exons
assert len(list(db3.features_of_type("exon"))) == 8
# Make sure that the autoincrements for exons jumped by 2
assert (
db2._autoincrements == db3._autoincrements == {"exon": 4, "read": 112}
), db2._autoincrements
# More isolated test, merging two databases each created from the same file
# which itself contains only a single feature with no ID.
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write("chr1\t.\tgene\t10\t15\t.\t+\t.\t\n")
db4 = gffutils.create_db(tmp, tmp + ".db")
db5 = gffutils.create_db(tmp, ":memory:")
assert db4._autoincrements == {"gene": 1}
assert db5._autoincrements == {"gene": 1}
db6 = db4.update(db5)
db7 = gffutils.FeatureDB(db4.dbfn)
# both db4 and db6 should now have the same, updated autoincrements because
# they both point to the same db.
assert db6._autoincrements == db4._autoincrements == {"gene": 2}
# But db5 was created independently and should have unchanged autoincrements
assert db5._autoincrements == {"gene": 1}
# db7 was created from the database pointed to by both db4 and db6. This
# tests that when a FeatureDB is created it should have the
# correctly-updated autoincrements read from the db
assert db7._autoincrements == {"gene": 2}
def test_pr_131():
db = gffutils.create_db(gffutils.example_filename("FBgn0031208.gff"), ":memory:")
# previously would raise ValueError("No lines parsed -- was an empty
# file provided?"); now just does nothing
db2 = db.update([])
def test_pr_133():
# Previously, merge_attributes would not deep-copy the values from the
# second dict, and when the values are then modified, the second dict is
# unintentionally modified.
d1 = {"a": [1]}
d2 = {"a": [2]}
d1a = {"a": [1]}
d2a = {"a": [2]}
d3 = gffutils.helpers.merge_attributes(d1, d2)
assert d1 == d1a, d1
assert d2 == d2a, d2
def test_pr_139():
db = gffutils.create_db(gffutils.example_filename("FBgn0031208.gff"), ":memory:")
exons = list(db.features_of_type("exon"))
inter = list(db.interfeatures(exons))
# previously, the first exon's attributes would show up in subsequent merged features
first_name = exons[0].attributes["Name"][0]
for i in inter[1:]:
if "Name" in i.attributes:
assert first_name not in i.attributes["Name"], str(i)
def test_pr_144():
# previously this would fail with:
# UnboundLocalError: local variable 'part' referenced before assignment
f = gffutils.Feature(attributes={"a": [""]})
# Make sure everything got converted correctly
assert f.attributes["a"] == [""]
assert str(f) == ". . . . . . . . a"
g = gffutils.feature.feature_from_line(str(f))
assert g == f
def test_pr_172():
line = (
"NC_049222.1\tGnomon\tgene\t209085\t282880\t.\t-\t.\t"
'gene_id "ENPP1_3"; transcript_id ""; db_xref "GeneID:100856150";'
'db_xref "VGNC:VGNC:40374"; gbkey "Gene"; gene "ENPP1"; '
'gene_biotype "protein_coding";\n'
)
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write(line)
db = gffutils.create_db(tmp, ":memory:")
def test_pr_171():
q = gffutils.parser.Quoter()
assert q.__missing__("\n") == "%0A"
assert q.__missing__("a") == "a"
assert q.__missing__("") == ""
def test_issue_129():
# thanks @Brunox13 for the detailed notes on #129
line = 'chr1\tdemo\tstart_codon\t69091\t69093\t.\t+\t.\tgene_id "demo";\n'
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, "w") as fout:
fout.write(line)
db = gffutils.create_db(tmp, ":memory:")
# ASCII art to visualize each test (coords are along the top, from 69087 to
# 69090). The tests slide a 4-bp region over the original 3-bp start codon.
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69087, 69090), featuretype="start_codon"))
assert len(res) == 0
# NOTE: prior to #162, this did not return anything
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69088, 69091), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69089, 69092), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69090, 69093), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69091, 69094), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69092, 69095), featuretype="start_codon"))
assert len(res) == 1
# NOTE: priro to #162, this did not return anything
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69093, 69096), featuretype="start_codon"))
assert len(res) == 1
# 7 8 9 0 1 2 3 4 5 6 7
# | | | Orig feature
# | | | | Test feature
res = list(db.region(region=("chr1", 69094, 69097), featuretype="start_codon"))
assert len(res) == 0
def test_issue_128():
# In #128, some lines had separators of "; " and some with ";". The first
# one in the file would win. Now the detection pays more attention to lines
# with more attributes to make it work properly
gff = gffutils.example_filename('gms2_example.gff3')
db = gffutils.create_db(gff, ":memory:", force=True)
expected = {
'ID': ['1'],
'Parent': ['gene_1'],
'gene_type': ['native'],
'partial': ['11'],
'gc': ['33'],
'length': ['363'],
}
assert dict(db['1'].attributes) == expected
def test_issue_157():
# With the merge overhaul, children_bp incorrectly still used ignore_strand.
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'), ":memory:")
gene = next(db.features_of_type('gene'))
children = list(db.children(gene, featuretype='exon'))
# Modify the last one to have a different strand so we can test the
# ignore_strand argument.
children[-1].strand = '-'
db.update(children[-1:], merge_strategy='replace')
# and, since updating has been problematic in the past, double-check again
# that the strand is changed in the db.
assert list(db.children(gene, featuretype='exon'))[-1].strand == '-'
cbp1 = db.children_bp(gene, child_featuretype='exon')
# Previously this would give:
# TypeError: merge() got an unexpected keyword argument 'ignore_strand'
#
# Now changing to ValueError and suggesting a fix.
assert_raises(ValueError, db.children_bp, gene, child_featuretype='exon', merge=True, ignore_strand=True)
assert_raises(ValueError, db.children_bp, gene, ignore_strand=True, nonexistent=True)
assert_raises(TypeError, db.children_bp, gene, nonexistent=True)
# The way to do it now is the following (we can omit the mc.feature_type
# since we're preselecting for exons anyway):
db.children_bp(gene, child_featuretype='exon', merge=True, merge_criteria=(mc.overlap_end_inclusive))
def test_issue_159():
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'), ":memory:")
fasta = gffutils.example_filename('dm6-chr2L.fa')
for transcript, seq in gffutils.helpers.canonical_transcripts(db, fasta):
pass
def test_issue_164():
# Something strange with the original annotation, but seems fine at least
# after pasting in the offending genes from the GitHub comments.
db = gffutils.create_db(
gffutils.example_filename('sharr.gtf'),
':memory:',
disable_infer_transcripts=True,
disable_infer_genes=True,
id_spec={'gene': 'gene_id', 'transcript': 'transcript_id'},
merge_strategy='create_unique',
keep_order=True)
def test_issue_166():
# Added the new FeatureDB.seqids() method.
db = gffutils.create_db(gffutils.example_filename('nonascii'), ':memory:')
seqs = list(db.seqids())
assert seqs == ['2L', '2R', '3L', '3R', 'X'], seqs
def test_issue_167():
# Previously was causing sqlite3.InterfaceError
db = gffutils.create_db(gffutils.example_filename('issue167.gff'), ':memory:')
def test_issue_174():
db = gffutils.create_db(
gffutils.example_filename('issue174.gtf'),
':memory:',
merge_strategy='warning',
)
introns = [f for f in db.create_introns()]
observed = [i.attributes['exon_number'] for i in introns]
assert observed[7] == ['8', '9']
assert observed[8] == ['10', '9']
assert observed[9] == ['10', '11']
# Now do the same thing, but with the new numeric_sort arg
introns = [f for f in db.create_introns(numeric_sort=True)]
observed = [i.attributes['exon_number'] for i in introns]
assert observed[7] == ['8', '9']
# This should be fixed:
assert observed[8] == ['9', '10']
assert observed[9] == ['10', '11']
def test_issue_181():
db = gffutils.create_db(
gffutils.example_filename('issue181.gff'),
':memory:')
introns = db.create_introns()
# This now warns that the provided ID key has multiple values.
assert_raises(ValueError, db.update, introns)
# The fix is to provide a custom intron ID converter.
def intron_id(f):
return ','.join(f['ID'])
db.update(introns, id_spec={'intron': [intron_id]})
def test_issue_197():
# Previously this would fail with ValueError due to using the stop position
# of the last item on the previous chrom as the start position.
db = gffutils.create_db(gffutils.example_filename('issue_197.gff'), ':memory:', merge_strategy='error')
genes = list(db.features_of_type('gene'))
igss = list( db.interfeatures(genes,new_featuretype='intergenic_space') )
def transform(f):
f['ID'] = [ '-'.join(f.attributes['ID']) ]
return f
db = db.update(igss, transform=transform, merge_strategy='error')
obs = list(db.features_of_type('intergenic_space'))
for i in obs:
print(i)
assert [str(i) for i in obs] == [
'tig00000492\tgffutils_derived\tintergenic_space\t47236\t47350\t.\t-\t.\tID=ctg492.gene0001-ctg492.gene0002;Name=gene0001,gene0002',
'tig00000492\tgffutils_derived\tintergenic_space\t48257\t49999\t.\t-\t.\tID=ctg492.gene0002-gene0;Name=gene0002',
'tig00000492\tgffutils_derived\tintergenic_space\t50050\t50054\t.\t-\t.\tID=gene3-gene4',
'tig00000492\tgffutils_derived\tintergenic_space\t50071\t50071\t.\t-\t.\tID=gene4-gene5',
'tig00000492\tgffutils_derived\tintergenic_space\t50076\t50089\t.\t-\t.\tID=gene5-gene6',
]
| mit | 40df47b16299a4a0c3b50b7f7095fc5a | 34.072261 | 140 | 0.61724 | 3.114469 | false | true | false | false |
daler/gffutils | gffutils/create.py | 1 | 54349 | import copy
import warnings
import collections
import tempfile
import sys
import os
import sqlite3
import six
from textwrap import dedent
from gffutils import constants
from gffutils import version
from gffutils import bins
from gffutils import helpers
from gffutils import feature
from gffutils import interface
from gffutils import iterators
from gffutils.exceptions import EmptyInputError
import logging
formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def deprecation_handler(kwargs):
"""
As things change from version to version, deal with them here.
"""
# After reconsidering, let's leave `infer_gene_extent` for another release.
# But when it's time to deprecate it, use this code:
if 0:
if "infer_gene_extent" in kwargs:
raise ValueError(
"'infer_gene_extent' is deprecated as of version 0.8.4 in "
"favor of more granular control over inferring genes and/or "
"transcripts. The previous default was "
"'infer_gene_extent=True`, which corresponds to the new "
"defaults "
"'disable_infer_genes=False' and "
"'disable_infer_transcripts=False'. Please see the docstring "
"for gffutils.create_db for details."
)
if len(kwargs) > 0:
raise TypeError("unhandled kwarg in %s" % kwargs)
class _DBCreator(object):
def __init__(
self,
data,
dbfn,
force=False,
verbose=False,
id_spec=None,
merge_strategy="error",
checklines=10,
transform=None,
force_dialect_check=False,
from_string=False,
dialect=None,
default_encoding="utf-8",
disable_infer_genes=False,
disable_infer_transcripts=False,
infer_gene_extent=True,
force_merge_fields=None,
text_factory=str,
pragmas=constants.default_pragmas,
_keep_tempfiles=False,
directives=None,
**kwargs
):
"""
Base class for _GFFDBCreator and _GTFDBCreator; see create_db()
function for docs
"""
self._keep_tempfiles = _keep_tempfiles
if force_merge_fields is None:
force_merge_fields = []
if merge_strategy == "merge":
if set(["start", "end"]).intersection(force_merge_fields):
raise ValueError(
"Can't merge start/end fields since " "they must be integers"
)
warn = set(force_merge_fields).intersection(["frame", "strand"])
for w in warn:
warnings.warn(
"%s field will be merged for features with the same ID; "
"this may result in unusable features." % w
)
self.force_merge_fields = force_merge_fields
self.pragmas = pragmas
self.merge_strategy = merge_strategy
self.default_encoding = default_encoding
if directives is None:
directives = []
self.directives = directives
if not infer_gene_extent:
warnings.warn(
"'infer_gene_extent' will be deprecated. For now, "
"the following equivalent values were automatically "
"set: 'disable_infer_genes=True', "
"'disable_infer_transcripts=True'. Please use these "
"instead in the future."
)
disable_infer_genes = True
disable_infer_transcripts = True
self.disable_infer_genes = disable_infer_genes
self.disable_infer_transcripts = disable_infer_transcripts
if force:
if os.path.exists(dbfn):
os.unlink(dbfn)
self.dbfn = dbfn
self.id_spec = id_spec
if isinstance(dbfn, six.string_types):
conn = sqlite3.connect(dbfn)
else:
conn = dbfn
self.conn = conn
self.conn.row_factory = sqlite3.Row
self.set_verbose(verbose)
if text_factory is not None:
logger.debug("setting text factory to %s" % text_factory)
self.conn.text_factory = text_factory
self._data = data
self._orig_logger_level = logger.level
self.iterator = iterators.DataIterator(
data=data,
checklines=checklines,
transform=transform,
force_dialect_check=force_dialect_check,
from_string=from_string,
dialect=dialect,
)
# keys are featuretypes, values are integers. Results in unique,
# derived feature IDs like "exon_94".
if "_autoincrements" in kwargs:
self._autoincrements = kwargs["_autoincrements"]
else:
self._autoincrements = collections.defaultdict(int)
def set_verbose(self, verbose=None):
if verbose == "debug":
logger.setLevel(logging.DEBUG)
elif verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.ERROR)
self.verbose = verbose
def _increment_featuretype_autoid(self, key):
self._autoincrements[key] += 1
return "%s_%s" % (key, self._autoincrements[key])
def _id_handler(self, f):
"""
Given a Feature from self.iterator, figure out what the ID should be.
This uses `self.id_spec` to identify the ID.
"""
# If id_spec is a string or callable, convert to iterable for later
if isinstance(self.id_spec, six.string_types):
id_key = [self.id_spec]
elif hasattr(self.id_spec, "__call__"):
id_key = [self.id_spec]
# If dict, then assume it's a feature -> attribute mapping, e.g.,
# {'gene': 'gene_id'} for GTF
elif isinstance(self.id_spec, dict):
try:
id_key = self.id_spec[f.featuretype]
if isinstance(id_key, six.string_types):
id_key = [id_key]
# Otherwise, use default auto-increment.
except KeyError:
return self._increment_featuretype_autoid(f.featuretype)
# Otherwise assume it's an iterable.
else:
id_key = self.id_spec
# Then try them in order, returning the first one that works:
for k in id_key:
if hasattr(k, "__call__"):
_id = k(f)
if _id:
if _id.startswith("autoincrement:"):
return self._increment_featuretype_autoid(_id[14:])
return _id
else:
# use GFF fields rather than attributes for cases like :seqid:
# or :strand:
if (len(k) > 3) and (k[0] == ":") and (k[-1] == ":"):
# No trailing [0] here to get first item -- only attributes
# key/vals are forced into lists, not standard GFF fields
# like seqid or strand.
return getattr(f, k[1:-1])
else:
try:
if len(f.attributes[k]) > 1:
raise ValueError(
"The ID field {} has more than one value but "
"a single value is required for a primary key in the "
"database. Consider using a custom id_spec to "
"convert these multiple values into a single "
"value".format(k))
except KeyError:
pass
try:
return f.attributes[k][0]
except (KeyError, IndexError):
pass
# If we get here, then default autoincrement
return self._increment_featuretype_autoid(f.featuretype)
def _get_feature(self, ID):
c = self.conn.cursor()
results = c.execute(constants._SELECT + " WHERE id = ?", (ID,)).fetchone()
return feature.Feature(dialect=self.iterator.dialect, **results)
def _do_merge(self, f, merge_strategy, add_duplicate=False):
"""
Different merge strategies upon name conflicts.
"error":
Raise error
"warning"
Log a warning, which indicates that all future instances of the
same ID will be ignored
"merge":
Combine old and new attributes -- but only if everything else
matches; otherwise error. This can be slow, but is thorough.
"create_unique":
Autoincrement based on the ID, always creating a new ID.
"replace":
Replaces existing database feature with `f`.
"""
if merge_strategy == "error":
raise ValueError("Duplicate ID {0.id}".format(f))
if merge_strategy == "warning":
logger.warning(
"Duplicate lines in file for id '{0.id}'; "
"ignoring all but the first".format(f)
)
return None, merge_strategy
elif merge_strategy == "replace":
return f, merge_strategy
# This is by far the most complicated strategy.
elif merge_strategy == "merge":
# Recall that if we made it to this method, there was at least one
# ID collision.
# This will eventually contain the features that match ID AND that
# match non-attribute fields like start, stop, strand, etc.
features_to_merge = []
# Iterate through all features that have the same ID according to
# the id_spec provided.
if self.verbose == "debug":
logger.debug(
"candidates with same idspec: %s"
% ([i.id for i in self._candidate_merges(f)])
)
# If force_merge_fields was provided, don't check them even if
# they're different. We are assuming the attributes field will be
# different, hence the [:-1]
_gffkeys_to_check = list(
set(constants._gffkeys[:-1]).difference(self.force_merge_fields)
)
for existing_feature in self._candidate_merges(f):
# Check other GFF fields (if not specified in
# self.force_merge_fields) to make sure they match.
other_attributes_same = True
for k in _gffkeys_to_check:
if getattr(existing_feature, k) != getattr(f, k):
other_attributes_same = False
break
if other_attributes_same:
# All the other GFF fields match. So this existing feature
# should be merged.
features_to_merge.append(existing_feature)
logger.debug(
"same attributes between:\nexisting: %s"
"\nthis : %s" % (existing_feature, f)
)
else:
# The existing feature's GFF fields don't match, so don't
# append anything.
logger.debug(
"different attributes between:\nexisting: %s\n"
"this : %s" % (existing_feature, f)
)
if len(features_to_merge) == 0:
# No merge candidates found, so we should make a new ID for
# this feature. This can happen when idspecs match, but other
# fields (like start/stop) are different. Call this method
# again, but using the "create_unique" strategy, and then
# record the newly-created ID in the duplicates table.
orig_id = f.id
uniqued_feature, merge_strategy = self._do_merge(
f, merge_strategy="create_unique"
)
self._add_duplicate(orig_id, uniqued_feature.id)
return uniqued_feature, merge_strategy
# Whoo! Found some candidates to merge.
else:
logger.debug("num candidates: %s" % len(features_to_merge))
# This is the attributes dictionary we'll be modifying.
merged_attributes = copy.deepcopy(f.attributes)
# Keep track of non-attribute fields (this will be an empty
# dict if no force_merge_fields)
final_fields = dict(
[
(field, set([getattr(f, field)]))
for field in self.force_merge_fields
]
)
# Update the attributes
for existing_feature in features_to_merge:
logger.debug("\nmerging\n\n%s\n%s\n" % (f, existing_feature))
for k in existing_feature.attributes.keys():
v = merged_attributes.setdefault(k, [])
v.extend(existing_feature[k])
merged_attributes[k] = v
# Update the set of non-attribute fields found so far
for field in self.force_merge_fields:
final_fields[field].update([getattr(existing_feature, field)])
# Set the merged attributes
for k, v in merged_attributes.items():
merged_attributes[k] = list(set(v))
existing_feature.attributes = merged_attributes
# Set the final merged non-attributes
for k, v in final_fields.items():
setattr(existing_feature, k, ",".join(sorted(map(str, v))))
logger.debug("\nMERGED:\n%s" % existing_feature)
return existing_feature, merge_strategy
elif merge_strategy == "create_unique":
f.id = self._increment_featuretype_autoid(f.id)
return f, merge_strategy
else:
raise ValueError("Invalid merge strategy '%s'" % (merge_strategy))
def _add_duplicate(self, idspecid, newid):
"""
Adds a duplicate ID (as identified by id_spec) and its new ID to the
duplicates table so that they can be later searched for merging.
Parameters
----------
newid : str
The primary key used in the features table
idspecid : str
The ID identified by id_spec
"""
c = self.conn.cursor()
try:
c.execute(
"""
INSERT INTO duplicates
(idspecid, newid)
VALUES (?, ?)""",
(idspecid, newid),
)
except sqlite3.ProgrammingError:
c.execute(
"""
INSERT INTO duplicates
(idspecid, newid)
VALUES (?, ?)""",
(
idspecid.decode(self.default_encoding),
newid.decode(self.default_encoding),
),
)
logger.debug("added id=%s; new=%s" % (idspecid, newid))
self.conn.commit()
def _candidate_merges(self, f):
"""
Identifies those features that originally had the same ID as `f`
(according to the id_spec), but were modified because of duplicate
IDs.
"""
candidates = [self._get_feature(f.id)]
c = self.conn.cursor()
results = c.execute(
constants._SELECT
+ """
JOIN duplicates ON
duplicates.newid = features.id WHERE duplicates.idspecid = ?""",
(f.id,),
)
for i in results:
candidates.append(feature.Feature(dialect=self.iterator.dialect, **i))
return list(set(candidates))
def _populate_from_lines(self, lines):
raise NotImplementedError
def _update_relations(self):
raise NotImplementedError
def _drop_indexes(self):
c = self.conn.cursor()
for index in constants.INDEXES:
c.execute("DROP INDEX IF EXISTS ?", (index,))
self.conn.commit()
def set_pragmas(self, pragmas):
"""
Set pragmas for the current database connection.
Parameters
----------
pragmas : dict
Dictionary of pragmas; see constants.default_pragmas for a template
and http://www.sqlite.org/pragma.html for a full list.
"""
self.pragmas = pragmas
c = self.conn.cursor()
c.executescript(";\n".join(["PRAGMA %s=%s" % i for i in self.pragmas.items()]))
self.conn.commit()
def _init_tables(self):
"""
Table creation
"""
c = self.conn.cursor()
v = sqlite3.sqlite_version_info
self.set_pragmas(self.pragmas)
c.executescript(constants.SCHEMA)
self.conn.commit()
def _finalize(self):
"""
Various last-minute stuff to perform after file has been parsed and
imported.
In general, if you'll be adding stuff to the meta table, do it here.
"""
c = self.conn.cursor()
directives = self.directives
c.executemany(
"""
INSERT INTO directives VALUES (?)
""",
((i,) for i in directives),
)
c.execute(
"""
INSERT INTO meta (version, dialect)
VALUES (:version, :dialect)""",
dict(
version=version.version, dialect=helpers._jsonify(self.iterator.dialect)
),
)
c.executemany(
"""
INSERT OR REPLACE INTO autoincrements VALUES (?, ?)
""",
list(self._autoincrements.items()),
)
# These indexes are *well* worth the effort and extra storage: over
# 500x speedup on code like this:
#
# genes = []
# for i in db.features_of_type('snoRNA'):
# for k in db.parents(i, level=1, featuretype='gene'):
# genes.append(k.id)
#
logger.info("Creating relations(parent) index")
c.execute("DROP INDEX IF EXISTS relationsparent")
c.execute("CREATE INDEX relationsparent ON relations (parent)")
logger.info("Creating relations(child) index")
c.execute("DROP INDEX IF EXISTS relationschild")
c.execute("CREATE INDEX relationschild ON relations (child)")
logger.info("Creating features(featuretype) index")
c.execute("DROP INDEX IF EXISTS featuretype")
c.execute("CREATE INDEX featuretype ON features (featuretype)")
logger.info("Creating features (seqid, start, end) index")
c.execute("DROP INDEX IF EXISTS seqidstartend")
c.execute("CREATE INDEX seqidstartend ON features (seqid, start, end)")
logger.info("Creating features (seqid, start, end, strand) index")
c.execute("DROP INDEX IF EXISTS seqidstartendstrand")
c.execute(
"CREATE INDEX seqidstartendstrand ON features (seqid, start, end, strand)"
)
# speeds computation 1000x in some cases
logger.info("Running ANALYZE features")
c.execute("ANALYZE features")
self.conn.commit()
self.warnings = self.iterator.warnings
def create(self):
"""
Calls various methods sequentially in order to fully build the
database.
"""
# Calls each of these methods in order. _populate_from_lines and
# _update_relations must be implemented in subclasses.
self._init_tables()
self._populate_from_lines(self.iterator)
self._update_relations()
self._finalize()
# TODO: not sure this is used anywhere
def update(self, iterator):
self._populate_from_lines(iterator)
self._update_relations()
def execute(self, query):
"""
Execute a query directly on the database.
"""
c = self.conn.cursor()
result = c.execute(query)
for i in result:
yield i
def _insert(self, feature, cursor):
"""
Insert a feature into the database.
"""
try:
cursor.execute(constants._INSERT, feature.astuple())
except sqlite3.ProgrammingError:
cursor.execute(constants._INSERT, feature.astuple(self.default_encoding))
def _replace(self, feature, cursor):
"""
Insert a feature into the database.
"""
try:
cursor.execute(constants._UPDATE, list(feature.astuple()) + [feature.id])
except sqlite3.ProgrammingError:
cursor.execute(
constants._INSERT,
list(feature.astuple(self.default_encoding)) + [feature.id],
)
class _GFFDBCreator(_DBCreator):
def __init__(self, *args, **kwargs):
"""
_DBCreator subclass specifically for working with GFF files.
create_db() delegates to this class -- see that function for docs
"""
super(_GFFDBCreator, self).__init__(*args, **kwargs)
def _populate_from_lines(self, lines):
c = self.conn.cursor()
self._drop_indexes()
last_perc = 0
logger.info("Populating features")
msg = "Populating features table and first-order relations: " "%d features\r"
# c.executemany() was not as much of an improvement as I had expected.
#
# Compared to a benchmark of doing each insert separately:
# executemany using a list of dicts to iterate over is ~15% slower
# executemany using a list of tuples to iterate over is ~8% faster
features_seen = None
_features, _relations = [], []
for i, f in enumerate(lines):
features_seen = i
# Percent complete
if self.verbose:
if i % 1000 == 0:
sys.stderr.write(msg % i)
sys.stderr.flush()
# TODO: handle ID creation here...should be combined with the
# INSERT below (that is, don't IGNORE below but catch the error and
# re-try with a new ID). However, is this doable with an
# execute-many?
f.id = self._id_handler(f)
try:
self._insert(f, c)
except sqlite3.IntegrityError:
fixed, final_strategy = self._do_merge(f, self.merge_strategy)
if final_strategy == "merge":
c.execute(
"""
UPDATE features SET attributes = ?
WHERE id = ?
""",
(helpers._jsonify(fixed.attributes), fixed.id),
)
# For any additional fields we're merging, update those as
# well.
if self.force_merge_fields:
_set_clause = ", ".join(
["%s = ?" % field for field in self.force_merge_fields]
)
values = [
getattr(fixed, field) for field in self.force_merge_fields
] + [fixed.id]
c.execute(
"""
UPDATE features SET %s
WHERE id = ?
"""
% _set_clause,
tuple(values),
)
elif final_strategy == "replace":
self._replace(f, c)
elif final_strategy == "create_unique":
self._insert(f, c)
if "Parent" in f.attributes:
for parent in f.attributes["Parent"]:
c.execute(
"""
INSERT OR IGNORE INTO relations VALUES
(?, ?, 1)
""",
(parent, f.id),
)
if features_seen is None:
raise EmptyInputError("No lines parsed -- was an empty file provided?")
self.conn.commit()
if self.verbose:
logger.info(msg % i)
def _update_relations(self):
logger.info("Updating relations")
c = self.conn.cursor()
c2 = self.conn.cursor()
c3 = self.conn.cursor()
# TODO: pre-compute indexes?
# c.execute('CREATE INDEX ids ON features (id)')
# c.execute('CREATE INDEX parentindex ON relations (parent)')
# c.execute('CREATE INDEX childindex ON relations (child)')
# self.conn.commit()
if isinstance(self._keep_tempfiles, six.string_types):
suffix = self._keep_tempfiles
else:
suffix = ".gffutils"
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix).name
with open(tmp, "w") as fout:
# Here we look for "grandchildren" -- for each ID, get the child
# (parenthetical subquery below); then for each of those get *its*
# child (main query below).
#
# Results are written to temp file so that we don't read and write at
# the same time, which would slow things down considerably.
c.execute("SELECT id FROM features")
for parent in c:
c2.execute(
"""
SELECT child FROM relations WHERE parent IN
(SELECT child FROM relations WHERE parent = ?)
""",
tuple(parent),
)
for grandchild in c2:
fout.write("\t".join((parent[0], grandchild[0])) + "\n")
def relations_generator():
with open(fout.name) as fin:
for line in fin:
parent, child = line.strip().split("\t")
yield dict(parent=parent, child=child, level=2)
c.executemany(
"""
INSERT OR IGNORE INTO relations VALUES
(:parent, :child, :level)
""",
relations_generator(),
)
# TODO: Index creation. Which ones affect performance?
c.execute("DROP INDEX IF EXISTS binindex")
c.execute("CREATE INDEX binindex ON features (bin)")
self.conn.commit()
if not self._keep_tempfiles:
os.unlink(fout.name)
class _GTFDBCreator(_DBCreator):
def __init__(self, *args, **kwargs):
"""
create_db() delegates to this class -- see that function for docs
"""
self.transcript_key = kwargs.pop("transcript_key", "transcript_id")
self.gene_key = kwargs.pop("gene_key", "gene_id")
self.subfeature = kwargs.pop("subfeature", "exon")
super(_GTFDBCreator, self).__init__(*args, **kwargs)
def _populate_from_lines(self, lines):
msg = "Populating features table and first-order relations: %d " "features\r"
c = self.conn.cursor()
# Only check this many features to see if it's a gene or transcript and
# issue the appropriate warning.
gene_and_transcript_check_limit = 1000
last_perc = 0
lines_seen = 0
for i, f in enumerate(lines):
# See issues #48 and #20.
if lines_seen < gene_and_transcript_check_limit:
if f.featuretype == "transcript" and not self.disable_infer_transcripts:
warnings.warn(
"It appears you have a transcript feature in your GTF "
"file. You may want to use the "
"`disable_infer_transcripts=True` "
"option to speed up database creation"
)
elif f.featuretype == "gene" and not self.disable_infer_genes:
warnings.warn(
"It appears you have a gene feature in your GTF "
"file. You may want to use the "
"`disable_infer_genes=True` "
"option to speed up database creation"
)
lines_seen = i + 1
# Percent complete
if self.verbose:
if i % 1000 == 0:
sys.stderr.write(msg % i)
sys.stderr.flush()
f.id = self._id_handler(f)
# Insert the feature itself...
try:
self._insert(f, c)
except sqlite3.IntegrityError:
fixed, final_strategy = self._do_merge(f, self.merge_strategy)
if final_strategy == "merge":
c.execute(
"""
UPDATE features SET attributes = ?
WHERE id = ?
""",
(helpers._jsonify(fixed.attributes), fixed.id),
)
# For any additional fields we're merging, update those as
# well.
if self.force_merge_fields:
_set_clause = ", ".join(
["%s = ?" % field for field in self.force_merge_fields]
)
values = [
getattr(fixed, field) for field in self.force_merge_fields
] + [fixed.id]
c.execute(
"""
UPDATE features SET %s
WHERE id = ?
"""
% _set_clause,
values,
)
elif final_strategy == "replace":
self._replace(f, c)
elif final_strategy == "create_unique":
self._insert(f, c)
# For an on-spec GTF file,
# self.transcript_key = "transcript_id"
# self.gene_key = "gene_id"
relations = []
parent = None
grandparent = None
if (
self.transcript_key in f.attributes
and f.attributes[self.transcript_key]
):
parent = f.attributes[self.transcript_key][0]
relations.append((parent, f.id, 1))
if self.gene_key in f.attributes:
grandparent = f.attributes[self.gene_key]
if len(grandparent) > 0:
grandparent = grandparent[0]
relations.append((grandparent, f.id, 2))
if parent is not None:
relations.append((grandparent, parent, 1))
# Note the IGNORE, so relationships defined many times in the file
# (e.g., the transcript-gene relation on pretty much every line in
# a GTF) will only be included once.
c.executemany(
"""
INSERT OR IGNORE INTO relations (parent, child, level)
VALUES (?, ?, ?)
""",
relations,
)
if lines_seen == 0:
raise ValueError("No lines parsed -- was an empty file provided?")
logger.info("Committing changes")
self.conn.commit()
if self.verbose:
logger.info(msg % i)
def _update_relations(self):
if self.disable_infer_genes and self.disable_infer_transcripts:
return
# TODO: do any indexes speed this up?
c = self.conn.cursor()
c2 = self.conn.cursor()
logger.info("Creating relations(parent) index")
c.execute("DROP INDEX IF EXISTS relationsparent")
c.execute("CREATE INDEX relationsparent ON relations (parent)")
logger.info("Creating relations(child) index")
c.execute("DROP INDEX IF EXISTS relationschild")
c.execute("CREATE INDEX relationschild ON relations (child)")
if not (self.disable_infer_genes or self.disable_infer_transcripts):
msg = "gene and transcript"
elif self.disable_infer_transcripts:
msg = "gene"
elif self.disable_infer_genes:
msg = "transcript"
logger.info("Inferring %s extents " "and writing to tempfile" % msg)
if isinstance(self._keep_tempfiles, six.string_types):
suffix = self._keep_tempfiles
else:
suffix = ".gffutils"
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix).name
with open(tmp, "w") as fout:
self._tmpfile = tmp
# This takes some explanation...
#
# First, the nested subquery gets the level-1 parents of
# self.subfeature featuretypes. For an on-spec GTF file,
# self.subfeature = "exon". So this subquery translates to getting the
# distinct level-1 parents of exons -- which are transcripts.
#
# OK, so this first subquery is now a list of transcripts; call it
# "firstlevel".
#
# Then join firstlevel on relations, but the trick is to now consider
# each transcript a *child* -- so that relations.parent (on the first
# line of the query) will be the first-level parent of the transcript
# (the gene).
#
#
# The result is something like:
#
# transcript1 gene1
# transcript2 gene1
# transcript3 gene2
#
# Note that genes are repeated; below we need to ensure that only one
# is added. To ensure this, the results are ordered by the gene ID.
#
# By the way, we do this even if we're only looking for transcripts or
# only looking for genes.
c.execute(
"""
SELECT DISTINCT firstlevel.parent, relations.parent
FROM (
SELECT DISTINCT parent
FROM relations
JOIN features ON features.id = relations.child
WHERE features.featuretype = ?
AND relations.level = 1
)
AS firstlevel
JOIN relations ON firstlevel.parent = child
WHERE relations.level = 1
ORDER BY relations.parent
""",
(self.subfeature,),
)
# Now we iterate through those results (using a new cursor) to infer
# the extent of transcripts and/or genes.
last_gene_id = None
n_features = 0
for transcript_id, gene_id in c:
if not self.disable_infer_transcripts:
# transcript extent
c2.execute(
"""
SELECT MIN(start), MAX(end), strand, seqid
FROM features
JOIN relations ON
features.id = relations.child
WHERE parent = ? AND featuretype == ?
""",
(transcript_id, self.subfeature),
)
transcript_start, transcript_end, strand, seqid = c2.fetchone()
transcript_attributes = {
self.transcript_key: [transcript_id],
self.gene_key: [gene_id],
}
transcript_bin = bins.bins(
transcript_start, transcript_end, one=True
)
# Write out to file; we'll be reading it back in shortly. Omit
# score, frame, source, and extra since they will always have
# the same default values (".", ".", "gffutils_derived", and []
# respectively)
fout.write(
"\t".join(
map(
str,
[
transcript_id,
seqid,
transcript_start,
transcript_end,
strand,
"transcript",
transcript_bin,
helpers._jsonify(transcript_attributes),
],
)
)
+ "\n"
)
n_features += 1
if not self.disable_infer_genes:
# Infer gene extent, but only if we haven't done so already
# for this gene; recall we sorted by gene id so this check
# works
if gene_id != last_gene_id:
c2.execute(
"""
SELECT MIN(start), MAX(end), strand, seqid
FROM features
JOIN relations ON
features.id = relations.child
WHERE parent = ? AND featuretype == ?
""",
(gene_id, self.subfeature),
)
gene_start, gene_end, strand, seqid = c2.fetchone()
gene_attributes = {self.gene_key: [gene_id]}
gene_bin = bins.bins(gene_start, gene_end, one=True)
fout.write(
"\t".join(
map(
str,
[
gene_id,
seqid,
gene_start,
gene_end,
strand,
"gene",
gene_bin,
helpers._jsonify(gene_attributes),
],
)
)
+ "\n"
)
last_gene_id = gene_id
n_features += 1
def derived_feature_generator():
"""
Generator of items from the file that was just created...
"""
keys = [
"parent",
"seqid",
"start",
"end",
"strand",
"featuretype",
"bin",
"attributes",
]
with open(fout.name) as fin:
for line in fin:
d = dict(list(zip(keys, line.strip().split("\t"))))
d.pop("parent")
d["score"] = "."
d["source"] = "gffutils_derived"
d["frame"] = "."
d["extra"] = []
d["attributes"] = helpers._unjsonify(d["attributes"])
f = feature.Feature(**d)
f.id = self._id_handler(f)
yield f
# Drop the indexes so the inserts are faster
c.execute("DROP INDEX IF EXISTS relationsparent")
c.execute("DROP INDEX IF EXISTS relationschild")
# Insert the just-inferred transcripts and genes. TODO: should we
# *always* use "merge" here for the merge_strategy?
logger.info("Importing inferred features into db")
last_perc = None
for i, f in enumerate(derived_feature_generator()):
perc = int(i / float(n_features) * 100)
if perc != last_perc:
sys.stderr.write("%s of %s (%s%%)\r" % (i, n_features, perc))
sys.stderr.flush()
last_perc = perc
try:
self._insert(f, c)
except sqlite3.IntegrityError:
fixed, final_strategy = self._do_merge(f, "merge")
c.execute(
"""
UPDATE features SET attributes = ?
WHERE id = ?
""",
(helpers._jsonify(fixed.attributes), fixed.id),
)
logger.info("Committing changes")
self.conn.commit()
if not self._keep_tempfiles:
os.unlink(fout.name)
# TODO: recreate indexes? Typically the _finalize() method will be
# called after this one, and indexes are created in _finalize().
def create_db(
data,
dbfn,
id_spec=None,
force=False,
verbose=False,
checklines=10,
merge_strategy="error",
transform=None,
gtf_transcript_key="transcript_id",
gtf_gene_key="gene_id",
gtf_subfeature="exon",
force_gff=False,
force_dialect_check=False,
from_string=False,
keep_order=False,
text_factory=str,
force_merge_fields=None,
pragmas=constants.default_pragmas,
sort_attribute_values=False,
dialect=None,
_keep_tempfiles=False,
infer_gene_extent=True,
disable_infer_genes=False,
disable_infer_transcripts=False,
**kwargs
):
"""
Create a database from a GFF or GTF file.
For more details on when and how to use the kwargs below, see the examples
in the online documentation (:ref:`examples`).
Parameters
----------
data : string or iterable
If a string (and `from_string` is False), then `data` is the path to
the original GFF or GTF file.
If a string and `from_string` is True, then assume `data` is the actual
data to use.
Otherwise, it's an iterable of Feature objects.
dbfn : string
Path to the database that will be created. Can be the special string
":memory:" to create an in-memory database.
id_spec : string, list, dict, callable, or None
This parameter guides what will be used as the primary key for the
database, which in turn determines how you will access individual
features by name from the database.
If an id spec is not otherwise specified for a featuretype (keep
reading below for how to do this), or the provided id spec is not
available for a particular feature (say, exons do not have "ID"
attributes even though `id_spec="ID"` was provided) then the default
behavior is to autoincrement an ID for that featuretype. For example,
if there is no id spec defined for an exon, then the ids for exons will
take the form exon1, exon2, exon3, and so on. This ensures that each
feature has a unique primary key in the database without requiring lots
of configuration. However, if you want to be able to retrieve features
based on their primary key, then it is worth the effort to provide an
accurate id spec.
If `id_spec=None`, then use the default behavior. The default behavior
depends on the detected format (or forced format, e.g., if
`force_gff=True`). For GFF files, the default is be `id_spec="ID"`. For
GTF files, the default is `id_spec={'gene': 'gene_id', 'transcript':
'transcript_id'}`.
If `id_spec` is a string, then look for this key in the attributes. If
it exists, then use its value as the primary key, otherwise
autoincrement based on the feature type. For many GFF3 files, "ID"
usually works well.
If `id_spec` is a list or tuple of keys, then check for each one in
order, using the first one found. For GFF3, this might be modified to
["ID", "Name"], which would use the ID if it exists, otherwise the
Name, otherwise autoincrement based on the feature type.
If `id_spec` is a dictionary, then it is a mapping of feature types to
what should be used as the ID. For example, for GTF files, `{'gene':
'gene_id', 'transcript': 'transcript_id'}` may be useful. The values
of this dictionary can also be a list, e.g., `{'gene': ['gene_id',
'geneID']}`.
If `id_spec` is a callable object, then it accepts a dictionary from
the iterator and returns one of the following:
* None (in which case the feature type will be auto-incremented)
* string (which will be used as the primary key)
* special string starting with "autoincrement:X", where "X" is
a string that will be used for auto-incrementing. For example,
if "autoincrement:chr10", then the first feature will be
"chr10_1", the second "chr10_2", and so on.
force : bool
If `False` (default), then raise an exception if `dbfn` already exists.
Use `force=True` to overwrite any existing databases.
verbose : bool
Report percent complete and other feedback on how the db creation is
progressing.
In order to report percent complete, the entire file needs to be read
once to see how many items there are; for large files you may want to
use `verbose=False` to avoid this.
checklines : int
Number of lines to check the dialect.
merge_strategy : str
One of {merge, create_unique, error, warning, replace}.
This parameter specifies the behavior when two items have an identical
primary key.
Using `merge_strategy="merge"`, then there will be a single entry in
the database, but the attributes of all features with the same primary
key will be merged. WARNING: this can be quite slow when incorrectly
used.
Using `merge_strategy="create_unique"`, then the first entry will use
the original primary key, but the second entry will have a unique,
autoincremented primary key assigned to it
Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID`
exception will be raised. This means you will have to edit the file
yourself to fix the duplicated IDs.
Using `merge_strategy="warning"`, a warning will be printed to the
logger, and the duplicate feature will be skipped.
Using `merge_strategy="replace"` will replace the entire existing
feature with the new feature.
transform : callable
If not None, `transform` should accept a Feature object as its only
argument and return either a (possibly modified) Feature object or
a value that evaluates to False. If the return value is False, the
feature will be skipped.
gtf_transcript_key, gtf_gene_key : string
Which attribute to use as the transcript ID and gene ID respectively
for GTF files. Default is `transcript_id` and `gene_id` according to
the GTF spec.
gtf_subfeature : string
Feature type to use as a "gene component" when inferring gene and
transcript extents for GTF files. Default is `exon` according to the
GTF spec.
force_gff : bool
If True, do not do automatic format detection -- only use GFF.
force_dialect_check : bool
If True, the dialect will be checkef for every feature (instead of just
`checklines` features). This can be slow, but may be necessary for
inconsistently-formatted input files.
from_string : bool
If True, then treat `data` as actual data (rather than the path to
a file).
keep_order : bool
If True, all features returned from this instance will have the
order of their attributes maintained. This can be turned on or off
database-wide by setting the `keep_order` attribute or with this
kwarg, or on a feature-by-feature basis by setting the `keep_order`
attribute of an individual feature.
Note that a single order of attributes will be used for all features.
Specifically, the order will be determined by the order of attribute
keys in the first `checklines` of the input data. See
helpers._choose_dialect for more information on this.
Default is False, since this includes a sorting step that can get
time-consuming for many features.
infer_gene_extent : bool
DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and
`disable_infer_genes` for more granular control.
disable_infer_transcripts, disable_infer_genes : bool
Only used for GTF files. By default -- and according to the GTF spec --
we assume that there are no transcript or gene features in the file.
gffutils then infers the extent of each transcript based on its
constituent exons and infers the extent of each gene bases on its
constituent transcripts.
This default behavior is problematic if the input file already contains
transcript or gene features (like recent GENCODE GTF files for human),
since 1) the work to infer extents is unnecessary, and 2)
trying to insert an inferred feature back into the database triggers
gffutils' feature-merging routines, which can get time consuming.
The solution is to use `disable_infer_transcripts=True` if your GTF
already has transcripts in it, and/or `disable_infer_genes=True` if it
already has genes in it. This can result in dramatic (100x) speedup.
Prior to version 0.8.4, setting `infer_gene_extents=False` would
disable both transcript and gene inference simultaneously. As of
version 0.8.4, these argument allow more granular control.
force_merge_fields : list
If merge_strategy="merge", then features will only be merged if their
non-attribute values are identical (same chrom, source, start, stop,
score, strand, phase). Using `force_merge_fields`, you can override
this behavior to allow merges even when fields are different. This
list can contain one or more of ['seqid', 'source', 'featuretype',
'score', 'strand', 'frame']. The resulting merged fields will be
strings of comma-separated values. Note that 'start' and 'end' are not
available, since these fields need to be integers.
text_factory : callable
Text factory to use for the sqlite3 database.
pragmas : dict
Dictionary of pragmas used when creating the sqlite3 database. See
http://www.sqlite.org/pragma.html for a list of available pragmas. The
defaults are stored in constants.default_pragmas, which can be used as
a template for supplying a custom dictionary.
sort_attribute_values : bool
All features returned from the database will have their attribute
values sorted. Typically this is only useful for testing, since this
can get time-consuming for large numbers of features.
_keep_tempfiles : bool or string
False by default to clean up intermediate tempfiles created during GTF
import. If True, then keep these tempfile for testing or debugging.
If string, then keep the tempfile for testing, but also use the string
as the suffix fo the tempfile. This can be useful for testing in
parallel environments.
Returns
-------
New :class:`FeatureDB` object.
"""
_locals = locals()
# Check if any older kwargs made it in
deprecation_handler(kwargs)
kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs)
# First construct an iterator so that we can identify the file format.
# DataIterator figures out what kind of data was provided (string of lines,
# filename, or iterable of Features) and checks `checklines` lines to
# identify the dialect.
iterator = iterators.DataIterator(**kwargs)
kwargs.update(**_locals)
if dialect is None:
dialect = iterator.dialect
# However, a side-effect of this is that if `data` was a generator, then
# we've just consumed `checklines` items (see
# iterators.BaseIterator.__init__, which calls iterators.peek).
#
# But it also chains those consumed items back onto the beginning, and the
# result is available as as iterator._iter.
#
# That's what we should be using now for `data:
kwargs["data"] = iterator
kwargs["directives"] = iterator.directives
# Since we've already checked lines, we don't want to do it again
kwargs["checklines"] = 0
if force_gff or (dialect["fmt"] == "gff3"):
cls = _GFFDBCreator
id_spec = id_spec or "ID"
add_kwargs = dict(
id_spec=id_spec,
)
elif dialect["fmt"] == "gtf":
cls = _GTFDBCreator
id_spec = id_spec or {"gene": "gene_id", "transcript": "transcript_id"}
add_kwargs = dict(
transcript_key=gtf_transcript_key,
gene_key=gtf_gene_key,
subfeature=gtf_subfeature,
id_spec=id_spec,
)
kwargs.update(**add_kwargs)
kwargs["dialect"] = dialect
c = cls(**kwargs)
c.create()
if dbfn == ":memory:":
db = interface.FeatureDB(
c.conn,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory,
)
else:
db = interface.FeatureDB(
c,
keep_order=keep_order,
pragmas=pragmas,
sort_attribute_values=sort_attribute_values,
text_factory=text_factory,
)
return db
| mit | a616f8c4a30faa43df34c53c750ee909 | 37.300916 | 88 | 0.5346 | 4.664349 | false | false | false | false |
conjure-up/conjure-up | test/test_utils.py | 3 | 3454 | #!/usr/bin/env python
#
# tests test/utils.py
#
# Copyright Canonical, Ltd.
import asyncio
import logging
import unittest
from unittest.mock import patch
from conjureup import utils
from .helpers import test_loop
class UtilsTestCase(unittest.TestCase):
def test_valid_hostnames(self):
"Verify is_valid_hostname"
hostnames = [
'battlemidget.lol',
'www.battlemidget.lol',
'localhost',
'localhost.localdomain',
'_underscores-is_ok',
'double--dash_is_ok'
]
for hostname in hostnames:
assert utils.is_valid_hostname(hostname)
def test_invalid_hostnames(self):
"Verify is_valid_hostname detects invalid hostnames"
hostnames = [
'-no-starting-dash.com',
'.localhost',
'localhost.no-end-dash-'
]
for hostname in hostnames:
assert not utils.is_valid_hostname(hostname)
@patch.object(utils, 'juju_version')
@patch.object(utils, 'app')
def test_sentry_report(self, app, juju_version):
# test task schedule
flag = asyncio.Event()
with patch.object(utils, '_sentry_report',
lambda *a, **kw: flag.set()):
with test_loop() as loop:
app.loop = loop
utils.sentry_report('m')
loop.run_until_complete(asyncio.wait_for(flag.wait(), 30))
# test implementation
app.config = {'spell': 'spell'}
app.provider.cloud_type = 'type'
app.provider.region = 'region'
app.is_jaas = False
app.headless = False
juju_version.return_value = '2.j'
app.no_report = True
utils._sentry_report('message', tags={'foo': 'bar'})
assert not app.sentry.capture.called
app.no_report = False
utils._sentry_report('message', tags={'foo': 'bar'})
app.sentry.capture.assert_called_once_with(
'raven.events.Message',
message='message',
level=logging.WARNING,
tags={
'spell': 'spell',
'cloud_type': 'type',
'region': 'region',
'jaas': False,
'headless': False,
'juju_version': '2.j',
'foo': 'bar'
})
app.sentry.capture.reset_mock()
utils._sentry_report('message', 'exc_info')
app.sentry.capture.assert_called_once_with(
'raven.events.Exception',
level=logging.ERROR,
exc_info='exc_info',
tags={
'spell': 'spell',
'cloud_type': 'type',
'region': 'region',
'jaas': False,
'headless': False,
'juju_version': '2.j'
})
def test_subtract_dicts(self):
d = {
'foo': {
'bar': 1,
'baz': 2,
},
'qux': [1, 2],
}
d_orig = utils.merge_dicts(d, {}) # make deep copy
# full key delete
self.assertEqual(utils.subtract_dicts(d, {'foo': None}),
{'qux': [1, 2]})
# ensure no side-effects
self.assertEqual(d, d_orig)
# sub-key delete
self.assertEqual(utils.subtract_dicts(d, {'foo': {'baz': None}}),
{'foo': {'bar': 1}, 'qux': [1, 2]})
| mit | 29fa1a41b14c42d41baca03e75c3ed53 | 28.775862 | 74 | 0.502027 | 3.956472 | false | true | false | false |
conjure-up/conjure-up | conjureup/ui/views/jaas.py | 3 | 1827 | from ubuntui.utils import Color, Padding
from urwid import Columns, Edit, Text
from conjureup.ui.views.base import BaseView
class JaaSLoginView(BaseView):
title = 'Login to JaaS'
subtitle = 'Enter your Ubuntu SSO credentials'
footer = ('Enter your Ubuntu SSO (Launchpad) email address and '
'password. If you have Two-Factor Authentication (2FA) '
'enabled, enter that as well, otherwise leave that field '
'blank. For more information about 2FA, see: '
'https://help.ubuntu.com/community/SSO/FAQs/2FA')
def __init__(self, submit_cb, back_cb, error=None):
self.submit_cb = submit_cb
self.prev_screen = back_cb
self.error = error
self.email = Edit()
self.password = Edit(mask='*')
self.twofa = Edit()
# can't use IntEdit because it precludes leading zeros
self.twofa.valid_char = lambda ch: ch in '0123456789'
super().__init__()
def build_widget(self):
rows = []
def add_row(label, field):
rows.extend([
Columns([('fixed', 23, Text(label)),
Color.string_input(field,
focus_map='string_input focus')]),
Padding.line_break(""),
])
add_row('Email:', self.email),
add_row('Password:', self.password),
add_row('Two-Factor Auth (2FA):', self.twofa),
if self.error:
rows.append(Color.error_major(Text(" {}".format(self.error))))
return rows
def build_buttons(self):
return [self.button('LOGIN', self.submit)]
def submit(self):
self.submit_cb(self.email.edit_text,
self.password.edit_text,
self.twofa.edit_text)
| mit | 19fbec2726320c3ac5ef69f64b4be3e2 | 33.471698 | 78 | 0.55884 | 3.946004 | false | false | false | false |
conjure-up/conjure-up | conjureup/ui/views/shutdown.py | 3 | 1640 | """ A View to inform user that shutdown is in progress.
"""
from ubuntui.utils import Padding
from urwid import Columns, LineBox, Pile, Text, WidgetWrap
from conjureup import events
from conjureup.app_config import app
from conjureup.ui.widgets.buttons import SubmitButton
class ShutdownView(WidgetWrap):
def __init__(self, exit_code):
self.exit_code = exit_code
self.message = Text('Do you want to quit?', align='center')
super().__init__(LineBox(Pile([
Padding.line_break(""),
self.message,
Padding.line_break(""),
Columns([
Text(""),
SubmitButton('Yes', lambda _: self.confirm()),
Text(""),
SubmitButton('No', lambda _: self.cancel()),
Text(""),
]),
Padding.line_break(""),
])))
if events.Error.is_set():
self.confirm()
def confirm(self):
self.message.set_text("Conjure-up is shutting down, please wait.")
del self._w.base_widget.contents[-2:] # remove buttons
events.Shutdown.set(self.exit_code)
def cancel(self):
app.ui.hide_shutdown_dialog()
def keypress(self, size, key):
if key == 'tab':
result = super().keypress(size, 'right')
if result == 'right':
result = super().keypress(size, 'left')
return result
if key.lower() == 'y':
self.confirm()
return None
if key.lower() == 'n':
self.cancel()
return None
return super().keypress(size, key)
| mit | bde7539de36a75a1eeea6546eb665dfd | 29.943396 | 74 | 0.539634 | 4.039409 | false | false | false | false |
conjure-up/conjure-up | conjureup/controllers/juju/jaaslogin/gui.py | 3 | 4361 | import asyncio
from subprocess import CalledProcessError
from conjureup import controllers, juju
from conjureup.app_config import app
from conjureup.consts import JAAS_DOMAIN
from conjureup.ui.views.interstitial import InterstitialView
from conjureup.ui.views.jaas import JaaSLoginView
class JaaSLoginController:
def __init__(self):
self.authenticating = asyncio.Event()
self.login_success = False
self.login_error = None
def render(self, going_back=False):
if going_back:
self.back() # if going back, skip this screen entirely
return
if not app.jaas_controller:
app.jaas_controller = 'jaas'
if self.login_success:
# already authed, don't waste time trying again
self.finish()
elif self.login_error is not None:
# saved error, go straight to form
self.render_login()
else:
# try to auth with cached creds
self.render_interstitial()
app.loop.create_task(self._try_token_auth())
def render_interstitial(self):
self.authenticating.set()
view = InterstitialView(title="JaaS Login Wait",
message="Logging in to JaaS. Please wait.",
event=self.authenticating)
view.show()
def render_login(self):
view = JaaSLoginView(self.register, self.back, self.login_error)
view.show()
async def _try_token_auth(self):
app.log.info('Attempting to register JAAS with saved token')
try:
await juju.register_controller(app.jaas_controller,
JAAS_DOMAIN,
'', '', '')
app.provider.controller = app.jaas_controller
app.is_jaas = True
controllers.use('showsteps').render()
except CalledProcessError:
# empty-but-not-None message to skip retrying token auth
self.login_error = ''
self.render_login()
def register(self, email=None, password=None, twofa=None):
self.render_interstitial()
app.loop.create_task(self._register(email, password, twofa))
async def _register(self, email, password, twofa):
if not await juju.register_controller(app.jaas_controller,
JAAS_DOMAIN,
email, password, twofa,
fail_cb=self.fail,
timeout_cb=self.timeout):
return
app.provider.controller = app.jaas_controller
self.authenticating.clear()
self.login_success = True
self.login_error = None
app.log.info('JAAS is registered')
self.finish()
def finish(self):
app.is_jaas = True
controllers.use('showsteps').render()
def fail(self, stderr):
self.authenticating.clear()
msg = stderr
if 'ERROR cannot get user details for' in msg:
msg = ('USSO account not connected with JaaS. Please login via '
'your browser at https://jujucharms.com/login to connect '
'your account, and then try this login again.')
else:
prefix = 'ERROR cannot get token: '
if msg.startswith(prefix):
msg = msg[len(prefix):]
prefix = 'Invalid request data (email: ['
if msg.startswith(prefix):
msg = msg[len(prefix):][:-3] # also strip trailing ])
msg = 'Login failed, please try again: {}'.format(msg)
self.login_error = msg
self.render_login()
def timeout(self):
self.authenticating.clear()
controllers = juju.get_controllers()
if app.jaas_controller in controllers['controllers']:
# registration seems to have worked; maybe we should remove and
# try again to be safe, but hopefully it's safe to just move on
return self.finish()
self.login_error = 'Timed out connecting to JaaS. Please try again.'
self.render_login()
def back(self):
controllers.use('controllerpicker').render(going_back=True)
_controller_class = JaaSLoginController
| mit | c00aab82477a0598a26379f83296f43e | 36.921739 | 77 | 0.573034 | 4.292323 | false | false | false | false |
conjure-up/conjure-up | conjureup/ui/views/destroy_confirm.py | 3 | 3999 | import datetime
from ubuntui.utils import Color, Padding
from ubuntui.widgets.buttons import menu_btn
from ubuntui.widgets.hr import HR
from ubuntui.widgets.text import Instruction
from urwid import Columns, Filler, Frame, Pile, Text, WidgetWrap
class DestroyConfirmView(WidgetWrap):
def __init__(self, app, controller, model, cb):
self.app = app
self.cb = cb
self.controller = controller
self.model = model
self.config = self.app.config
self.buttons_pile_selected = False
self.frame = Frame(body=self._build_widget(),
footer=self._build_footer())
self.frame.focus_position = 'footer'
self.buttons.focus_position = 1
super().__init__(self.frame)
def keypress(self, size, key):
if key in ['tab', 'shift tab']:
self._swap_focus()
return super().keypress(size, key)
def _swap_focus(self):
if not self.buttons_pile_selected:
self.buttons_pile_selected = True
self.frame.focus_position = 'footer'
self.buttons.focus_position = 1
else:
self.buttons_pile_selected = False
self.frame.focus_position = 'body'
def _build_footer(self):
no = menu_btn(on_press=self.cancel,
label="\n NO\n")
yes = menu_btn(on_press=self.submit,
label="\n YES\n")
self.buttons = Columns([
('fixed', 2, Text("")),
('fixed', 11, Color.menu_button(
no,
focus_map='button_primary focus')),
Text(""),
('fixed', 11, Color.menu_button(
yes,
focus_map='button_primary focus')),
('fixed', 2, Text(""))
])
self.footer = Pile([
Padding.line_break(""),
self.buttons
])
return Color.frame_footer(self.footer)
def _sanitize_date(self, date_obj):
""" Some cases juju uses human readable date/time like X secs ago and models
that run longer get a typical datetime.date object, need to make sure
of which one we're dealing with
Arguments:
date_obj: datetime.date object
Returns:
String representation of date or the Juju human readable string
if applicable
"""
if isinstance(date_obj, datetime.date):
return date_obj.strftime('%Y-%m-%d')
else:
return str(date_obj)
def _total_machines(self, model):
""" Returns total machines in model
"""
machines = model.get('machines', None)
if machines is None:
return 0
return len(machines.keys())
def _build_widget(self):
applications = self.app.juju.client.applications
total_items = [Instruction("Deployment Information:"), HR()]
tbl = Pile([
Columns([('fixed', 15, Text("Name")),
Text(self.model['name'])]),
Columns([('fixed', 15, Text("Cloud")),
Text(self.model['cloud'])]),
Columns([('fixed', 15, Text("Status")),
Text(self.model['status']['current'])]),
Columns([('fixed', 15, Text("Online")),
Text(self._sanitize_date(
self.model['status']['since']))]),
Columns([('fixed', 15, Text("Applications")),
Text(", ".join(applications.keys()))]),
Columns([('fixed', 15, Text("Machines")),
Text(str(self._total_machines(self.model)))])
])
total_items.append(tbl)
total_items.append(HR())
return Padding.center_80(Filler(Pile(total_items), valign='top'))
def submit(self, btn):
self.footer.contents[-1] = (Text(""), self.footer.options())
self.cb(self.controller, self.model['name'])
def cancel(self, btn):
self.cb(None, None)
| mit | dbe2a05e0372c74a47981ee45cd7e46e | 33.179487 | 84 | 0.540635 | 4.139752 | false | false | false | false |
conjure-up/conjure-up | conjureup/ui/views/spellpicker.py | 3 | 2161 | from ubuntui.ev import EventLoop
from ubuntui.utils import Color
from urwid import Text
from conjureup.ui.views.base import BaseView
from conjureup.ui.views.bundle_readme_view import BundleReadmeView
from conjureup.ui.widgets.selectors import MenuSelectButtonList
class SpellPickerView(BaseView):
title = "Spell Selection"
subtitle = "Choose from this list of recommended spells"
show_back_button = False
def __init__(self, app, spells, cb):
self.app = app
self.cb = cb
self.spells = spells
self.config = self.app.config
super().__init__()
self.extend_command_map({
'r': self.show_readme,
})
self.update_spell_description()
def show_readme(self):
_, rows = EventLoop.screen_size()
cur_spell = self.selected_spell
spellname = cur_spell['name']
spelldir = cur_spell['spell-dir']
brmv = BundleReadmeView(spellname, spelldir,
self.hide_readme,
int(rows * .75))
self.app.ui.set_header("Spell Readme")
self.app.ui.set_body(brmv)
def hide_readme(self):
self.show()
@property
def selected_spell(self):
return self.widget.selected
def update_spell_description(self):
spell = self.selected_spell
if spell:
self.set_footer(spell['description'])
else:
self.set_footer("No spell selected")
def after_keypress(self):
self.update_spell_description()
def build_widget(self):
widget = MenuSelectButtonList()
prev_cat = None
for category, spell in self.spells:
if category == "_unassigned_spells":
category = "other"
if category != prev_cat:
if prev_cat:
widget.append(Text(""))
widget.append(Color.label(Text(category)))
prev_cat = category
widget.append_option(spell['name'], spell)
widget.focus_position = 1
return widget
def next_screen(self):
self.cb(self.selected_spell['key'])
| mit | 395e586c4e00c6d46135a6b4fd85f48d | 29.871429 | 66 | 0.586765 | 3.893694 | false | false | false | false |
conjure-up/conjure-up | conjureup/events.py | 3 | 6590 | import asyncio
import errno
import inspect
from concurrent.futures import CancelledError
from pathlib import Path
from ubuntui.ev import EventLoop
from urwid import ExitMainLoop
from conjureup import errors, utils
from conjureup.app_config import app
from conjureup.telemetry import track_exception
class Event(asyncio.Event):
def __init__(self, name):
self._name = name
super().__init__()
def _log(self, action):
base_path = Path(__file__).parent.parent
frame = inspect.stack()[2]
event_methods = ('set', 'clear', 'wait')
if frame.filename == __file__ and frame.function in event_methods:
# NamedEvent wraps these methods and we want the original
# caller, so we need to jump up an extra stack frame
frame = inspect.stack()[3]
task = getattr(asyncio.Task.current_task(), '_coro', '')
try:
frame_file = Path(frame.filename).relative_to(base_path)
except ValueError:
frame_file = Path(frame.filename)
frame_lineno = frame.lineno
if task:
code = task.cr_frame.f_code
task_name = code.co_name
try:
task_file = Path(code.co_filename).relative_to(base_path)
except ValueError:
task_file = Path(code.co_filename)
task_lineno = task.cr_frame.f_lineno
if task_file != frame_file or task_lineno != frame_lineno:
task = ' in task {} at {}:{}'.format(task_name,
task_file,
task_lineno)
else:
task = ''
app.log.debug('{} {} at {}:{}{}'.format(action,
self._name,
frame_file,
frame_lineno,
task))
def set(self):
self._log('Setting')
super().set()
def clear(self):
self._log('Clearing')
super().clear()
async def wait(self):
self._log('Awaiting')
await super().wait()
self._log('Received')
class NamedEvent:
"""
Event wrapper that manages individual events per name.
"""
def __init__(self, name):
self._name = name
self._events = {}
def _event(self, name):
if name not in self._events:
self._events[name] = Event(':'.join([self._name, name]))
return self._events[name]
def set(self, name):
self._event(name).set()
def clear(self, name):
self._event(name).clear()
def is_set(self, name):
return self._event(name).is_set()
async def wait(self, name):
return await self._event(name).wait()
class ShutdownEvent(Event):
def set(self, exit_code=None):
if exit_code is not None:
app.exit_code = exit_code
return super().set()
Error = Event('Error')
Shutdown = ShutdownEvent('Shutdown')
MAASConnected = Event('MAASConnected')
Bootstrapped = Event('Bootstrapped')
ModelAvailable = Event('ModelAvailable')
ModelConnected = Event('ModelConnected')
PreDeployComplete = Event('PreDeployComplete')
MachinePending = NamedEvent('MachinePending')
MachineCreated = NamedEvent('MachineCreated')
AppMachinesCreated = NamedEvent('AppMachinesCreated')
AppDeployed = NamedEvent('AppDeployed')
PendingRelations = NamedEvent('PendingRelations')
RelationsAdded = NamedEvent('RelationsAdded')
DeploymentComplete = Event('DeploymentComplete')
ModelSettled = Event('ModelSettled')
PostDeployComplete = Event('PostDeployComplete')
LXDAvailable = Event('LXDAvailable')
# Keep a list of exceptions we know that shouldn't be logged
# into sentry.
NOTRACK_EXCEPTIONS = [
lambda exc: isinstance(exc, OSError) and exc.errno == errno.ENOSPC,
lambda exc: isinstance(exc, utils.SudoError),
lambda exc: isinstance(exc, errors.BootstrapInterrupt),
lambda exc: isinstance(exc, errors.MAASConfigError),
lambda exc: isinstance(exc, errors.SchemaError),
lambda exc: isinstance(exc, errors.LXDError),
]
def unhandled_input(key):
if key in ['q', 'Q', 'meta q']:
app.ui.quit()
if key in ['R']:
EventLoop.redraw_screen()
def handle_exception(loop, context):
exc = context.get('exception')
if exc is None or isinstance(exc, CancelledError):
return # not an error, cleanup message
if isinstance(exc, ExitMainLoop):
Shutdown.set() # use previously stored exit code
return
if Error.is_set():
return # already reporting an error
Error.set()
exc_info = (type(exc), exc, exc.__traceback__)
if any(pred(exc) for pred in NOTRACK_EXCEPTIONS):
app.log.debug('Would not track exception: {}'.format(exc))
if not (app.no_report or any(pred(exc) for pred in NOTRACK_EXCEPTIONS)):
track_exception(str(exc))
utils.sentry_report(exc_info=exc_info)
msg = 'Unhandled exception'
if 'future' in context:
msg += ' in {}'.format(context['future'])
app.log.exception(msg, exc_info=exc)
if app.headless:
msg = str(exc)
utils.error(msg)
Shutdown.set(1)
else:
app.exit_code = 1 # store exit code for later
app.ui.show_exception_message(exc) # eventually raises ExitMainLoop
async def shutdown_watcher():
app.log.info('Watching for shutdown')
try:
try:
await Shutdown.wait()
except asyncio.CancelledError:
pass
app.log.info('Shutting down')
if app.headless:
utils.warning('Shutting down')
# Store application configuration state
await app.save()
if app.juju.authenticated:
app.log.info('Disconnecting model')
await app.juju.client.disconnect()
app.log.info('Disconnected')
if not app.headless:
EventLoop.remove_alarms()
for task in asyncio.Task.all_tasks(app.loop):
# cancel all other tasks
coro = getattr(task, '_coro', None)
cr_code = getattr(coro, 'cr_code', None)
if cr_code is not shutdown_watcher.__code__:
app.log.debug('Cancelling pending task: {}'.format(task))
task.cancel()
await asyncio.sleep(0.1) # give tasks a chance to see the cancel
except Exception as e:
app.log.exception('Error in cleanup code: {}'.format(e))
app.loop.stop()
| mit | 2e6bf8dc07a30551f2b420a2d64d32aa | 31.146341 | 76 | 0.591958 | 4.028117 | false | false | false | false |
conjure-up/conjure-up | conjureup/controllers/juju/credentials/gui.py | 3 | 4000 | from os import path
import yaml
from conjureup import controllers, juju, utils
from conjureup.app_config import app
from conjureup.consts import CUSTOM_PROVIDERS, cloud_types
from conjureup.ui.views.credentials import (
CredentialPickerView,
NewCredentialView
)
from . import common
class CredentialsController(common.BaseCredentialsController):
def render(self, going_back=False):
self.load_credentials()
if app.provider.cloud_type == cloud_types.LOCAL:
# no credentials required for localhost
if going_back:
self.back()
else:
self.finish()
elif len(self.credentials) >= 1:
self.render_picker()
elif not app.provider.credential:
self.render_form()
else:
if going_back:
self.back()
else:
self.finish()
def render_form(self):
view = NewCredentialView(self.save_credential, self.switch_views)
view.show()
def render_picker(self):
view = CredentialPickerView(self.credentials, app.provider.credential,
self.select_credential, self.back)
view.show()
def switch_views(self):
if self.was_picker:
self.was_picker = False
return self.render_picker()
else:
self.was_picker = True
self.render_form()
def back(self):
return controllers.use('clouds').render(going_back=True)
def _format_creds(self):
""" Formats the credentials into strings from the widgets values
"""
formatted = {'auth-type': app.provider.auth_type}
for field in app.provider.form.fields():
if not field.storable:
continue
formatted[field.key] = field.value
return formatted
def select_credential(self, credential):
if credential is None:
self.switch_views()
else:
app.provider.credential = credential
self.finish()
def save_credential(self):
app.loop.create_task(self._save_credential())
async def _save_credential(self):
cred_path = path.join(utils.juju_path(), 'credentials.yaml')
app.provider.credential = "conjure-{}-{}".format(app.provider.cloud,
utils.gen_hash())
try:
existing_creds = yaml.safe_load(open(cred_path))
except:
existing_creds = {'credentials': {}}
if app.provider.cloud in existing_creds['credentials'].keys():
c = existing_creds['credentials'][app.provider.cloud]
c[app.provider.credential] = self._format_creds()
else:
# Handle the case where path exists but an entry for the cloud
# has yet to be added.
existing_creds['credentials'][app.provider.cloud] = {
app.provider.credential: self._format_creds()
}
with open(cred_path, 'w') as cred_f:
cred_f.write(yaml.safe_dump(existing_creds,
default_flow_style=False))
# Persist input fields in current provider, this is so we
# can login to the provider for things like querying VSphere
# for datacenters before that custom cloud is known to juju.
await app.provider.save_form()
# if it's a new MAAS or VSphere cloud, save it now that
# we have a credential
if app.provider.cloud_type in CUSTOM_PROVIDERS:
try:
juju.get_cloud(app.provider.cloud)
except LookupError:
juju.add_cloud(app.provider.cloud,
await app.provider.cloud_config())
# This should return the credential name so juju bootstrap knows
# which credential to bootstrap with
self.finish()
_controller_class = CredentialsController
| mit | 19b683932d450909204e42a00789662a | 32.613445 | 78 | 0.584 | 4.376368 | false | false | false | false |
conjure-up/conjure-up | conjureup/bundle.py | 3 | 9430 | """
Bundle class for providing some common utilities when manipulating the bundle
spec
"""
from collections import Mapping
from itertools import chain
import yaml
from conjureup.consts import spell_types
class BundleInvalidApplication(Exception):
pass
class BundleInvalidFragment(Exception):
pass
class BundleApplicationFragment(dict):
def __init__(self, name, *args, **kwargs):
self.name = name
super().__init__(*args, **kwargs)
self._constraints = self.get('constraints', "")
self._num_units = int(self.get('num_units', 0))
self._options = self.get('options', {})
@property
def constraints(self):
""" Set/Get application constraints
"""
return self._constraints
@constraints.setter
def constraints(self, val):
self._constraints = val
@property
def num_units(self):
""" Set/Get number of units for application
"""
return self._num_units
@num_units.setter
def num_units(self, val):
self._num_units = val
@property
def options(self):
""" Set/Get application options
"""
return self._options
@options.setter
def options(self, val):
self._options.update(val)
@property
def charm(self):
""" Provides charmstore endpoint
"""
if 'charm' not in self:
raise BundleInvalidFragment("Unable to locate 'charm' in "
"bundle fragment: {}".format(self))
return self['charm']
@property
def is_subordinate(self):
""" Returns if application fragment is a charm subordinate
"""
if self.num_units == 0:
return True
return False
@property
def to(self):
""" Returns machine placement
"""
return self.get('to', [])
def to_dict(self):
items = {
'charm': self.charm,
'num_units': self.num_units
}
if self.options:
items['options'] = self.options
if self.to:
items['to'] = self.to
if self.constraints:
items['constraints'] = self.constraints
expose = self.get('expose', False)
if expose:
items['expose'] = expose
return items
class SnapBundleApplicationFragment(dict):
def __init__(self, name, *args, **kwargs):
self.name = name
super().__init__(*args, **kwargs)
self._snap = self.get('snap', name)
self._channel = self.get('channel', 'stable')
self._options = self.get('options', {})
self._confinement = self.get('confinement', None)
@property
def snap(self):
""" Set/Get snap
"""
return self._snap
@snap.setter
def snap(self, val):
self._snap = val
@property
def confinement(self):
""" Get confinment
"""
return self._confinement
@confinement.setter
def confinement(self, val):
""" Set confinement value
"""
self._confinement = val
@property
def channel(self):
""" Set/Get snap channel
"""
return self._channel
@channel.setter
def channel(self, val):
self._channel = val
@property
def options(self):
""" Set/Get application options
"""
return self._options
@options.setter
def options(self, val):
self._options.update(val)
def to_dict(self):
return self
class Bundle(dict):
def __init__(self, bundle, spell_type=spell_types.JUJU):
self.spell_type = spell_type
super().__init__(self._normalize_bundle(bundle))
def _normalize_bundle(self, bundle):
""" Normalizes bundle for things
like applications vs. services
"""
new_bundle = {}
for k, v in bundle.items():
if k == 'services':
new_bundle['applications'] = v
else:
new_bundle[k] = v
return new_bundle
def to_yaml(self):
""" Returns yaml dump of bundle
"""
return yaml.dump(self.to_dict(),
default_flow_style=False)
def to_dict(self):
""" Returns dictionary representation
"""
return dict(self)
def _merge_dicts(self, *dicts):
"""
Return a new dictionary that is the result of merging the arguments
together.
In case of conflicts, later arguments take precedence over earlier
arguments.
ref: http://stackoverflow.com/a/8795331/3170835
"""
updated = {}
# grab all keys
keys = set()
for d in dicts:
keys = keys.union(set(d))
for key in keys:
values = [d[key] for d in dicts if key in d]
# which ones are mapping types? (aka dict)
maps = [value for value in values
if isinstance(value, Mapping)]
lists = [value for value in values
if isinstance(value, (list, tuple))]
if maps:
# if we have any mapping types, call recursively to merge them
updated[key] = self._merge_dicts(*maps)
elif lists:
# if any values are lists, we want to merge them
# (non-recursively) first, ensure all values are lists
for i in range(len(values)):
if not isinstance(values[i], (list, tuple)):
values[i] = [values[i]]
# then, merge all of the lists into a single list
updated[key] = list(chain.from_iterable(values))
else:
# otherwise, just grab the last value we have, since later
# arguments take precedence over earlier arguments
updated[key] = values[-1]
return updated
def _subtract_dicts(self, *dicts):
"""
Return a new dictionary that is the result of subtracting each dict
from the previous. Except for mappings, the values of the subsequent
are ignored and simply all matching keys are removed. If the value is
a mapping, however, then only the keys from the sub-mapping are
removed, recursively.
"""
result = self._merge_dicts(dicts[0], {}) # make a deep copy
for d in dicts[1:]:
for key, value in d.items():
if key not in result:
continue
if isinstance(value, Mapping):
result[key] = self._subtract_dicts(result[key], value)
if not result[key]:
# we removed everything from the mapping,
# so remove the whole thing
del result[key]
elif isinstance(value, (list, tuple)):
if not isinstance(result[key], (list, tuple)):
# if the original value isn't a list, then remove it
# if it matches any of the values in the given list
if result[key] in value:
del result[key]
else:
# for lists, remove any matching
# items (non-recursively)
result[key] = [item
for item in result[key]
if item not in value]
if not result[key]:
# we removed everything from the list,
# so remove the whole thing
del result[key]
else:
del result[key]
return result
def apply(self, fragment):
""" Applies bundle fragment to bundle, overwriting
any preexisting values
"""
_fragment = self._normalize_bundle(fragment)
result = self._merge_dicts(self, _fragment)
self.clear()
self.update(result)
def subtract(self, fragment):
""" Subtracts a bundle fragment from existing bundle
"""
_fragment = self._normalize_bundle(fragment)
result = self._subtract_dicts(self, _fragment)
self.clear()
self.update(result)
@property
def applications(self):
""" Returns list of applications/services
"""
_applications = []
for app in self['applications'].keys():
_applications.append(self._get_application_fragment(app))
return _applications
@property
def machines(self):
""" Returns defined machines
"""
return self.get('machines', [])
@property
def relations(self):
""" Returns application relations
"""
return self.get('relations', [])
def _get_application_fragment(self, app_name):
""" Returns bundle fragment
"""
if app_name not in self['applications']:
raise BundleInvalidApplication(
"Unable find a bundle fragment for: {}".format(app_name))
_fragment = self['applications'][app_name]
if self.spell_type == spell_types.SNAP:
return SnapBundleApplicationFragment(app_name, _fragment)
return BundleApplicationFragment(app_name, _fragment)
| mit | 5f9bf787675c5e6d879232a6accb4d7b | 29.616883 | 78 | 0.537116 | 4.668317 | false | false | false | false |
stripe/stripe-python | stripe/util.py | 1 | 7795 | from __future__ import absolute_import, division, print_function
import functools
import hmac
import io
import logging
import sys
import os
import re
import stripe
from stripe import six
from stripe.six.moves.urllib.parse import parse_qsl, quote_plus
STRIPE_LOG = os.environ.get("STRIPE_LOG")
logger = logging.getLogger("stripe")
__all__ = [
"io",
"parse_qsl",
"utf8",
"log_info",
"log_debug",
"dashboard_link",
"logfmt",
]
def utf8(value):
if six.PY2 and isinstance(value, six.text_type):
return value.encode("utf-8")
else:
return value
def is_appengine_dev():
return "APPENGINE_RUNTIME" in os.environ and "Dev" in os.environ.get(
"SERVER_SOFTWARE", ""
)
def _console_log_level():
if stripe.log in ["debug", "info"]:
return stripe.log
elif STRIPE_LOG in ["debug", "info"]:
return STRIPE_LOG
else:
return None
def log_debug(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() == "debug":
print(msg, file=sys.stderr)
logger.debug(msg)
def log_info(message, **params):
msg = logfmt(dict(message=message, **params))
if _console_log_level() in ["debug", "info"]:
print(msg, file=sys.stderr)
logger.info(msg)
def _test_or_live_environment():
if stripe.api_key is None:
return
match = re.match(r"sk_(live|test)_", stripe.api_key)
if match is None:
return
return match.groups()[0]
def dashboard_link(request_id):
return "https://dashboard.stripe.com/{env}/logs/{reqid}".format(
env=_test_or_live_environment() or "test", reqid=request_id
)
def logfmt(props):
def fmt(key, val):
# Handle case where val is a bytes or bytesarray
if six.PY3 and hasattr(val, "decode"):
val = val.decode("utf-8")
# Check if val is already a string to avoid re-encoding into
# ascii. Since the code is sent through 2to3, we can't just
# use unicode(val, encoding='utf8') since it will be
# translated incorrectly.
if not isinstance(val, six.string_types):
val = six.text_type(val)
if re.search(r"\s", val):
val = repr(val)
# key should already be a string
if re.search(r"\s", key):
key = repr(key)
return u"{key}={val}".format(key=key, val=val)
return u" ".join([fmt(key, val) for key, val in sorted(props.items())])
# Borrowed from Django's source code
if hasattr(hmac, "compare_digest"):
# Prefer the stdlib implementation, when available.
def secure_compare(val1, val2):
return hmac.compare_digest(utf8(val1), utf8(val2))
else:
def secure_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time
only when the two strings have the same length. It short-circuits when
they have different lengths.
"""
val1, val2 = utf8(val1), utf8(val2)
if len(val1) != len(val2):
return False
result = 0
if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for x, y in zip(val1, val2):
result |= x ^ y
else:
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def get_object_classes():
# This is here to avoid a circular dependency
from stripe.object_classes import OBJECT_CLASSES
return OBJECT_CLASSES
def convert_to_stripe_object(
resp, api_key=None, stripe_version=None, stripe_account=None, params=None
):
# If we get a StripeResponse, we'll want to return a
# StripeObject with the last_response field filled out with
# the raw API response information
stripe_response = None
if isinstance(resp, stripe.stripe_response.StripeResponse):
stripe_response = resp
resp = stripe_response.data
if isinstance(resp, list):
return [
convert_to_stripe_object(
i, api_key, stripe_version, stripe_account
)
for i in resp
]
elif isinstance(resp, dict) and not isinstance(
resp, stripe.stripe_object.StripeObject
):
resp = resp.copy()
klass_name = resp.get("object")
if isinstance(klass_name, six.string_types):
klass = get_object_classes().get(
klass_name, stripe.stripe_object.StripeObject
)
else:
klass = stripe.stripe_object.StripeObject
obj = klass.construct_from(
resp,
api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
last_response=stripe_response,
)
# We only need to update _retrieve_params when special params were
# actually passed. Otherwise, leave it as is as the list / search result
# constructors will instantiate their own params.
if (
params is not None
and hasattr(obj, "object")
and ((obj.object == "list") or (obj.object == "search_result"))
):
obj._retrieve_params = params
return obj
else:
return resp
def convert_to_dict(obj):
"""Converts a StripeObject back to a regular dict.
Nested StripeObjects are also converted back to regular dicts.
:param obj: The StripeObject to convert.
:returns: The StripeObject as a dict.
"""
if isinstance(obj, list):
return [convert_to_dict(i) for i in obj]
# This works by virtue of the fact that StripeObjects _are_ dicts. The dict
# comprehension returns a regular dict and recursively applies the
# conversion to each value.
elif isinstance(obj, dict):
return {k: convert_to_dict(v) for k, v in six.iteritems(obj)}
else:
return obj
def populate_headers(idempotency_key):
if idempotency_key is not None:
return {"Idempotency-Key": idempotency_key}
return None
def read_special_variable(params, key_name, default_value):
value = default_value
params_value = None
if params is not None and key_name in params:
params_value = params[key_name]
del params[key_name]
if value is None:
value = params_value
return value
def merge_dicts(x, y):
z = x.copy()
z.update(y)
return z
def sanitize_id(id):
utf8id = utf8(id)
quotedId = quote_plus(utf8id)
return quotedId
class class_method_variant(object):
def __init__(self, class_method_name):
self.class_method_name = class_method_name
def __call__(self, method):
self.method = method
return self
def __get__(self, obj, objtype=None):
@functools.wraps(self.method)
def _wrapper(*args, **kwargs):
if obj is not None:
# Method was called as an instance method, e.g.
# instance.method(...)
return self.method(obj, *args, **kwargs)
elif len(args) > 0 and isinstance(args[0], objtype):
# Method was called as a class method with the instance as the
# first argument, e.g. Class.method(instance, ...) which in
# Python is the same thing as calling an instance method
return self.method(args[0], *args[1:], **kwargs)
else:
# Method was called as a class method, e.g. Class.method(...)
class_method = getattr(objtype, self.class_method_name)
return class_method(*args, **kwargs)
return _wrapper
| mit | 6c3759e62a89c0bd4ffcc90eefda7577 | 27.977695 | 80 | 0.605773 | 3.767521 | false | false | false | false |
stripe/stripe-python | tests/api_resources/abstract/test_searchable_api_resource.py | 1 | 3046 | from __future__ import absolute_import, division, print_function
import stripe
class TestSearchableAPIResource(object):
class MySearchable(stripe.api_resources.abstract.SearchableAPIResource):
OBJECT_NAME = "mysearchable"
@classmethod
def search(cls, *args, **kwargs):
return cls._search(
search_url="/v1/mysearchables/search", *args, **kwargs
)
def test_search(self, request_mock):
request_mock.stub_request(
"get",
"/v1/mysearchables/search",
{
"object": "search_result",
"data": [
{"object": "charge", "name": "jose"},
{"object": "charge", "name": "curly"},
],
"url": "/v1/charges",
"has_more": False,
"next_page": None,
},
rheaders={"request-id": "req_id"},
)
res = self.MySearchable.search(query='currency:"CAD"')
request_mock.assert_requested("get", "/v1/mysearchables/search", {})
assert len(res.data) == 2
assert all(isinstance(obj, stripe.Charge) for obj in res.data)
assert res.data[0].name == "jose"
assert res.data[1].name == "curly"
assert res.last_response is not None
assert res.last_response.request_id == "req_id"
def test_search_multiple_pages(self, request_mock):
request_mock.stub_request(
"get",
"/v1/mysearchables/search",
{
"object": "search_result",
"data": [
{"object": "charge", "name": "jose"},
{"object": "charge", "name": "curly"},
],
"url": "/v1/charges",
"has_more": True,
"next_page": "next-page-token",
},
rheaders={"request-id": "req_id"},
)
res = self.MySearchable.search(query='currency:"CAD"')
request_mock.assert_requested(
"get", "/v1/mysearchables/search", {"query": 'currency:"CAD"'}
)
assert res.next_page == "next-page-token"
request_mock.stub_request(
"get",
"/v1/mysearchables/search",
{
"object": "list",
"data": [
{"object": "charge", "name": "test"},
],
"url": "/v1/charges",
"has_more": False,
"next_page": None,
},
rheaders={"request-id": "req_id"},
)
res2 = self.MySearchable.search(
query='currency:"CAD"', page=res.next_page
)
request_mock.assert_requested(
"get",
"/v1/mysearchables/search",
{"page": "next-page-token", "query": 'currency:"CAD"'},
)
assert len(res2.data) == 1
assert all(isinstance(obj, stripe.Charge) for obj in res2.data)
assert res2.data[0].name == "test"
| mit | 69b6ff36bc07f966ee4c7bc669ed9cb4 | 32.108696 | 76 | 0.478332 | 3.950713 | false | true | false | false |
stripe/stripe-python | stripe/api_resources/cash_balance.py | 1 | 1070 | # -*- coding: utf-8 -*-
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import APIResource
from stripe.api_resources.customer import Customer
from stripe.six.moves.urllib.parse import quote_plus
class CashBalance(APIResource):
"""
A customer's `Cash balance` represents real funds. Customers can add funds to their cash balance by sending a bank transfer. These funds can be used for payment and can eventually be paid out to your bank account.
"""
OBJECT_NAME = "cash_balance"
def instance_url(self):
customer = util.utf8(self.customer)
base = Customer.class_url()
cust_extn = quote_plus(customer)
return "%s/%s/cash_balance" % (base, cust_extn)
@classmethod
def retrieve(cls, id, api_key=None, **params):
raise NotImplementedError(
"Can't retrieve a Customer Cash Balance without a Customer ID. "
"Use Customer.retrieve_cash_balance('cus_123')"
)
| mit | e0d5a76d683b68a5752f69ae3a9d61bd | 35.896552 | 217 | 0.692523 | 3.848921 | false | false | false | false |
stripe/stripe-python | stripe/six.py | 4 | 34581 | # Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.16.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
if PY34:
from importlib.util import spec_from_loader
else:
spec_from_loader = None
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def find_spec(self, fullname, path, target=None):
if fullname in self.known_modules:
return spec_from_loader(fullname, self)
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections", "IterableUserDict", "UserDict"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| mit | f8672965ef67411bf2f2b64ab91d455d | 33.650301 | 118 | 0.624765 | 4.013114 | false | false | false | false |
okfn/website | aldryn_video/migrations/0001_initial.py | 2 | 1862 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0015_auto_20160421_0000'),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='OEmbedVideoPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to='cms.CMSPlugin',
on_delete=models.CASCADE,
)),
('url', models.URLField(help_text='vimeo and youtube supported.', max_length=100, verbose_name='URL')),
('width', models.IntegerField(null=True, verbose_name='Width', blank=True)),
('height', models.IntegerField(null=True, verbose_name='Height', blank=True)),
('iframe_width', models.CharField(max_length=15, verbose_name='iframe width', blank=True)),
('iframe_height', models.CharField(max_length=15, verbose_name='iframe height', blank=True)),
('auto_play', models.BooleanField(default=False, verbose_name='auto play')),
('loop_video', models.BooleanField(default=False, help_text='when true, the video repeats itself when over.', verbose_name='loop')),
('oembed_data', jsonfield.fields.JSONField(null=True)),
('custom_params', models.CharField(help_text='define custom params (e.g. "start=10&end=50")', max_length=200, verbose_name='custom params', blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| mit | e497aa553c7f681e476666e8be6b9450 | 43.333333 | 169 | 0.559076 | 4.391509 | false | false | false | false |
stripe/stripe-python | stripe/api_resources/payment_intent.py | 1 | 6998 | # -*- coding: utf-8 -*-
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.api_resources.abstract import SearchableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
class PaymentIntent(
CreateableAPIResource,
ListableAPIResource,
SearchableAPIResource,
UpdateableAPIResource,
):
"""
A PaymentIntent guides you through the process of collecting a payment from your customer.
We recommend that you create exactly one PaymentIntent for each order or
customer session in your system. You can reference the PaymentIntent later to
see the history of payment attempts for a particular session.
A PaymentIntent transitions through
[multiple statuses](https://stripe.com/docs/payments/intents#intent-statuses)
throughout its lifetime as it interfaces with Stripe.js to perform
authentication flows and ultimately creates at most one successful charge.
Related guide: [Payment Intents API](https://stripe.com/docs/payments/payment-intents).
"""
OBJECT_NAME = "payment_intent"
@classmethod
def _cls_apply_customer_balance(
cls,
intent,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_intents/{intent}/apply_customer_balance".format(
intent=util.sanitize_id(intent)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_apply_customer_balance")
def apply_customer_balance(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_intents/{intent}/apply_customer_balance".format(
intent=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_cancel(
cls,
intent,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_intents/{intent}/cancel".format(
intent=util.sanitize_id(intent)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_cancel")
def cancel(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_intents/{intent}/cancel".format(
intent=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_capture(
cls,
intent,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_intents/{intent}/capture".format(
intent=util.sanitize_id(intent)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_capture")
def capture(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_intents/{intent}/capture".format(
intent=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_confirm(
cls,
intent,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_intents/{intent}/confirm".format(
intent=util.sanitize_id(intent)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_confirm")
def confirm(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_intents/{intent}/confirm".format(
intent=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_increment_authorization(
cls,
intent,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_intents/{intent}/increment_authorization".format(
intent=util.sanitize_id(intent)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_increment_authorization")
def increment_authorization(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_intents/{intent}/increment_authorization".format(
intent=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_verify_microdeposits(
cls,
intent,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_intents/{intent}/verify_microdeposits".format(
intent=util.sanitize_id(intent)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_verify_microdeposits")
def verify_microdeposits(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_intents/{intent}/verify_microdeposits".format(
intent=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def search(cls, *args, **kwargs):
return cls._search(
search_url="/v1/payment_intents/search", *args, **kwargs
)
@classmethod
def search_auto_paging_iter(cls, *args, **kwargs):
return cls.search(*args, **kwargs).auto_paging_iter()
| mit | f67885628e08acb9e10a7f3fc6fe69fc | 29.692982 | 94 | 0.573592 | 4.068605 | false | false | false | false |
stripe/stripe-python | stripe/api_resources/financial_connections/account.py | 1 | 3222 | # -*- coding: utf-8 -*-
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import ListableAPIResource
class Account(ListableAPIResource):
"""
A Financial Connections Account represents an account that exists outside of Stripe, to which you have been granted some degree of access.
"""
OBJECT_NAME = "financial_connections.account"
@classmethod
def _cls_disconnect(
cls,
account,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/financial_connections/accounts/{account}/disconnect".format(
account=util.sanitize_id(account)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_disconnect")
def disconnect(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/financial_connections/accounts/{account}/disconnect".format(
account=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_list_owners(
cls,
account,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"get",
"/v1/financial_connections/accounts/{account}/owners".format(
account=util.sanitize_id(account)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_list_owners")
def list_owners(self, idempotency_key=None, **params):
return self._request(
"get",
"/v1/financial_connections/accounts/{account}/owners".format(
account=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_refresh_account(
cls,
account,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/financial_connections/accounts/{account}/refresh".format(
account=util.sanitize_id(account)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_refresh_account")
def refresh_account(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/financial_connections/accounts/{account}/refresh".format(
account=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
| mit | fa4455cb8d8ab4a87894a487801b55ca | 29.11215 | 142 | 0.563625 | 4.130769 | false | false | false | false |
okfn/website | foundation/organisation/cms_plugins.py | 1 | 4101 | from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from cms.extensions.extension_pool import extension_pool
from .models import (Project, Theme, FeaturedTheme, FeaturedProject,
ProjectList, NetworkGroup, NetworkGroupList, WorkingGroup,
SideBarExtension)
class FeaturedThemePlugin(CMSPluginBase):
model = FeaturedTheme
module = "OKF"
name = _("Featured Theme")
text_enabled = True
render_template = "organisation/theme_featured.html"
def icon_alt(self, instance):
return 'Theme: %s' % instance.theme.name
def icon_src(self, instance):
return settings.STATIC_URL + "cms/img/icons/plugins/snippet.png"
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
context['object'] = instance.theme
return context
plugin_pool.register_plugin(FeaturedThemePlugin)
class FeaturedProjectPlugin(CMSPluginBase):
model = FeaturedProject
module = "OKF"
name = _("Featured Project")
render_template = "organisation/project_featured.html"
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
context['project'] = instance.project
return context
plugin_pool.register_plugin(FeaturedProjectPlugin)
class ProjectListPlugin(CMSPluginBase):
model = ProjectList
module = "OKF"
name = _("Project List")
render_template = "organisation/project_list_plugin.html"
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
results = Project.objects.all()
if instance.theme:
results = results.filter(themes=instance.theme)
if instance.project_type:
results = results.filter(types=instance.project_type)
context['projects'] = results
return context
plugin_pool.register_plugin(ProjectListPlugin)
class ThemesPlugin(CMSPluginBase):
model = CMSPlugin
module = "OKF"
name = _("Theme list")
render_template = "organisation/theme_list.html"
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
context['object_header'] = _("Themes")
context['object_list'] = Theme.objects.all()
return context
plugin_pool.register_plugin(ThemesPlugin)
class NetworkGroupFlagsPlugin(CMSPluginBase):
model = NetworkGroupList
module = "OKF"
name = _("Network Group Flags")
render_template = "organisation/networkgroup_flags.html"
text_enabled = True
def icon_alt(self, instance):
return 'Network Group Flags: %s' % instance.theme.name
def icon_src(self, instance):
return settings.STATIC_URL + "cms/img/icons/plugins/snippet.png"
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
context['title'] = instance.get_group_type_display()
context['countries'] = NetworkGroup.objects.countries().filter(
group_type=instance.group_type).order_by('name')
return context
plugin_pool.register_plugin(NetworkGroupFlagsPlugin)
class WorkingGroupPlugin(CMSPluginBase):
model = CMSPlugin
module = "OKF"
name = _("Working Groups")
render_template = "organisation/workinggroup_shortlist.html"
text_enabled = True
def icon_alt(self, instance):
return 'Working Groups'
def icon_src(self, instance):
return settings.STATIC_URL + "cms/img/icons/plugins/snippet.png"
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
context['workinggroups'] = WorkingGroup.objects.active()
return context
plugin_pool.register_plugin(WorkingGroupPlugin)
extension_pool.register(SideBarExtension)
| mit | 1d515aa6b234746ed03040b0c29ad93a | 28.292857 | 79 | 0.691782 | 4.101 | false | false | false | false |
okfn/website | foundation/organisation/migrations/0001_initial.py | 2 | 15921 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True, max_length=100)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='BoardMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=100)),
('board', models.ForeignKey(to='organisation.Board', on_delete=models.CASCADE)),
],
),
migrations.CreateModel(
name='FeaturedProject',
fields=[
('cmsplugin_ptr', models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to='cms.CMSPlugin',
on_delete=models.CASCADE,
)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='FeaturedTheme',
fields=[
('cmsplugin_ptr', models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to='cms.CMSPlugin',
on_delete=models.CASCADE,
)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='NetworkGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('group_type', models.IntegerField(default=0, choices=[(0, b'Local group'), (1, b'Chapter')])),
('description', models.TextField(null=True, blank=True)),
('country', django_countries.fields.CountryField(max_length=2)),
('country_slug', models.SlugField()),
('region', models.CharField(max_length=100, blank=True)),
('region_slug', models.SlugField(default=None)),
('mailinglist_url', models.URLField(blank=True)),
('homepage_url', models.URLField(blank=True)),
('twitter', models.CharField(max_length=18, blank=True)),
('facebook_url', models.URLField(blank=True)),
('youtube_url', models.URLField(blank=True)),
('gplus_url', models.URLField(verbose_name=b'Google+ url', blank=True)),
('wiki_url', models.URLField(blank=True)),
('position', models.CharField(max_length=42, null=True, blank=True)),
('extra_information', models.TextField(null=True, blank=True)),
],
options={
'ordering': ('country', 'region'),
},
),
migrations.CreateModel(
name='NetworkGroupList',
fields=[
('cmsplugin_ptr', models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to='cms.CMSPlugin',
on_delete=models.CASCADE,
)),
('group_type', models.IntegerField(default=0, choices=[(0, b'Local group'), (1, b'Chapter')])),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='NetworkGroupMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=100, blank=True)),
('order', models.IntegerField(help_text=b'Higher numbers mean higher up in the food chain', null=True, blank=True)),
('networkgroup', models.ForeignKey(to='organisation.NetworkGroup', on_delete=models.CASCADE)),
],
options={
'ordering': ['-order', 'person__name'],
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('description', models.TextField(null=True, blank=True)),
('email', models.EmailField(max_length=254, blank=True)),
('photo', models.ImageField(upload_to=b'organisation/people/photos', blank=True)),
('twitter', models.CharField(max_length=18, blank=True)),
('url', models.URLField(blank=True)),
],
options={
'ordering': ['name'],
'verbose_name_plural': 'people',
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True, max_length=100)),
('teaser', models.CharField(help_text=b'A single line description for list views', max_length=100)),
('description', models.TextField()),
('banner', models.ImageField(help_text=b'A banner used for featuring this project on the front page', upload_to=b'projects/banners', blank=True)),
('picture', models.ImageField(help_text=b'A simple logo or picture to represent this project', upload_to=b'projects/pictures', blank=True)),
('twitter', models.CharField(max_length=18, blank=True)),
('homepage_url', models.URLField(blank=True)),
('sourcecode_url', models.URLField(blank=True)),
('mailinglist_url', models.URLField(blank=True)),
('featured', models.BooleanField(default=False, help_text=b'Should this be a featured project?')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='ProjectList',
fields=[
('cmsplugin_ptr', models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to='cms.CMSPlugin',
on_delete=models.CASCADE
)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='ProjectType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
name='SignupForm',
fields=[
('cmsplugin_ptr', models.OneToOneField(
parent_link=True,
auto_created=True,
primary_key=True,
serialize=False,
to='cms.CMSPlugin',
on_delete=models.CASCADE,
)),
('title', models.CharField(default=b'Get Connected to Open Knowledge', max_length=50)),
('description', models.TextField(blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='Theme',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True, max_length=100)),
('blurb', models.TextField(help_text=b'Blurb for theme page')),
('description', models.TextField()),
('picture', models.ImageField(help_text=b'A simple logo or picture to represent this theme', upload_to=b'themes/pictures', blank=True)),
],
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('order', models.IntegerField(help_text=b'Higher numbers mean higher up in the food chain', null=True, blank=True)),
],
options={
'ordering': ['-order', 'name'],
},
),
migrations.CreateModel(
name='UnitMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=100)),
('order', models.IntegerField(help_text=b'Higher numbers mean higher up in the food chain', null=True, blank=True)),
('person', models.ForeignKey(to='organisation.Person', on_delete=models.CASCADE)),
('unit', models.ForeignKey(to='organisation.Unit', on_delete=models.CASCADE)),
],
options={
'ordering': ['-order', 'person__name'],
},
),
migrations.CreateModel(
name='WorkingGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100)),
('slug', models.SlugField(unique=True, max_length=100)),
('description', models.TextField()),
('homepage_url', models.URLField(blank=True)),
('logo', models.ImageField(upload_to=b'organisation/working-groups/logos', blank=True)),
('incubation', models.BooleanField(default=True, help_text=b'Is this group in incubation?')),
('themes', models.ManyToManyField(related_name='workinggroups', to='organisation.Theme', blank=True)),
],
options={
'ordering': ('name',),
},
),
migrations.AddField(
model_name='unit',
name='members',
field=models.ManyToManyField(to='organisation.Person', through='organisation.UnitMembership'),
),
migrations.AddField(
model_name='projectlist',
name='project_type',
field=models.ForeignKey(
blank=True,
to='organisation.ProjectType',
help_text=b'Limit to projects with this type',
null=True,
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name='projectlist',
name='theme',
field=models.ForeignKey(
blank=True,
to='organisation.Theme',
help_text=b'Limit to projects with this theme',
null=True,
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name='project',
name='themes',
field=models.ManyToManyField(to='organisation.Theme', blank=True),
),
migrations.AddField(
model_name='project',
name='types',
field=models.ManyToManyField(to='organisation.ProjectType', blank=True),
),
migrations.AddField(
model_name='networkgroupmembership',
name='person',
field=models.ForeignKey(to='organisation.Person', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='networkgroup',
name='members',
field=models.ManyToManyField(to='organisation.Person', through='organisation.NetworkGroupMembership'),
),
migrations.AddField(
model_name='networkgroup',
name='working_groups',
field=models.ManyToManyField(to='organisation.WorkingGroup', blank=True),
),
migrations.AddField(
model_name='featuredtheme',
name='theme',
field=models.ForeignKey(
related_name='+',
to='organisation.Theme',
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name='featuredproject',
name='project',
field=models.ForeignKey(
related_name='+',
to='organisation.Project',
on_delete=models.CASCADE,
),
),
migrations.AddField(
model_name='boardmembership',
name='person',
field=models.ForeignKey(to='organisation.Person', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='board',
name='members',
field=models.ManyToManyField(to='organisation.Person', through='organisation.BoardMembership'),
),
migrations.AlterUniqueTogether(
name='networkgroup',
unique_together=set([('country', 'region')]),
),
]
| mit | 2c8f62b4602851bb84b0a0d3e819e498 | 43.472067 | 162 | 0.51743 | 4.715936 | false | false | false | false |
stripe/stripe-python | stripe/__init__.py | 1 | 1399 | from __future__ import absolute_import, division, print_function
import os
# Stripe Python bindings
# API docs at http://stripe.com/docs/api
# Authors:
# Patrick Collison <patrick@stripe.com>
# Greg Brockman <gdb@stripe.com>
# Andrew Metcalf <andrew@stripe.com>
# Configuration variables
api_key = None
client_id = None
api_base = "https://api.stripe.com"
connect_api_base = "https://connect.stripe.com"
upload_api_base = "https://files.stripe.com"
api_version = None
verify_ssl_certs = True
proxy = None
default_http_client = None
app_info = None
enable_telemetry = True
max_network_retries = 0
ca_bundle_path = os.path.join(
os.path.dirname(__file__), "data", "ca-certificates.crt"
)
# Set to either 'debug' or 'info', controls console logging
log = None
# API resources
from stripe.api_resources import * # noqa
# OAuth
from stripe.oauth import OAuth # noqa
# Webhooks
from stripe.webhook import Webhook, WebhookSignature # noqa
# Sets some basic information about the running application that's sent along
# with API requests. Useful for plugin authors to identify their plugin when
# communicating with Stripe.
#
# Takes a name and optional version and plugin URL.
def set_app_info(name, partner_id=None, url=None, version=None):
global app_info
app_info = {
"name": name,
"partner_id": partner_id,
"url": url,
"version": version,
}
| mit | c660449c31e0b71f68effb996becba2b | 24.436364 | 77 | 0.710508 | 3.299528 | false | false | false | false |
stripe/stripe-python | stripe/api_resources/payment_method.py | 1 | 2770 | # -*- coding: utf-8 -*-
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
class PaymentMethod(
CreateableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
):
"""
PaymentMethod objects represent your customer's payment instruments.
You can use them with [PaymentIntents](https://stripe.com/docs/payments/payment-intents) to collect payments or save them to
Customer objects to store instrument details for future payments.
Related guides: [Payment Methods](https://stripe.com/docs/payments/payment-methods) and [More Payment Scenarios](https://stripe.com/docs/payments/more-payment-scenarios).
"""
OBJECT_NAME = "payment_method"
@classmethod
def _cls_attach(
cls,
payment_method,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_methods/{payment_method}/attach".format(
payment_method=util.sanitize_id(payment_method)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_attach")
def attach(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_methods/{payment_method}/attach".format(
payment_method=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
@classmethod
def _cls_detach(
cls,
payment_method,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return cls._static_request(
"post",
"/v1/payment_methods/{payment_method}/detach".format(
payment_method=util.sanitize_id(payment_method)
),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
@util.class_method_variant("_cls_detach")
def detach(self, idempotency_key=None, **params):
return self._request(
"post",
"/v1/payment_methods/{payment_method}/detach".format(
payment_method=util.sanitize_id(self.get("id"))
),
idempotency_key=idempotency_key,
params=params,
)
| mit | 09ebd77ffefd2ea09b70115f89ec9f34 | 31.209302 | 174 | 0.610469 | 4.067548 | false | false | false | false |
stripe/stripe-python | stripe/api_resources/customer_balance_transaction.py | 1 | 1581 | # -*- coding: utf-8 -*-
# File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import APIResource
from stripe.api_resources.customer import Customer
from stripe.six.moves.urllib.parse import quote_plus
class CustomerBalanceTransaction(APIResource):
"""
Each customer has a [`balance`](https://stripe.com/docs/api/customers/object#customer_object-balance) value,
which denotes a debit or credit that's automatically applied to their next invoice upon finalization.
You may modify the value directly by using the [update customer API](https://stripe.com/docs/api/customers/update),
or by creating a Customer Balance Transaction, which increments or decrements the customer's `balance` by the specified `amount`.
Related guide: [Customer Balance](https://stripe.com/docs/billing/customer/balance) to learn more.
"""
OBJECT_NAME = "customer_balance_transaction"
def instance_url(self):
token = util.utf8(self.id)
customer = util.utf8(self.customer)
base = Customer.class_url()
cust_extn = quote_plus(customer)
extn = quote_plus(token)
return "%s/%s/balance_transactions/%s" % (base, cust_extn, extn)
@classmethod
def retrieve(cls, id, api_key=None, **params):
raise NotImplementedError(
"Can't retrieve a Customer Balance Transaction without a Customer ID. "
"Use Customer.retrieve_customer_balance_transaction('cus_123', 'cbtxn_123')"
)
| mit | 022b7f3cc363c4d1f03b389a5214b6db | 42.916667 | 133 | 0.710942 | 3.923077 | false | false | false | false |
stripe/stripe-python | tests/api_resources/abstract/test_test_helpers_api_resource.py | 1 | 2351 | from __future__ import absolute_import, division, print_function
import stripe
import pytest
from stripe import util
from stripe.api_resources.abstract import APIResourceTestHelpers
class TestTestHelperAPIResource(object):
@stripe.api_resources.abstract.test_helpers
class MyTestHelpersResource(stripe.api_resources.abstract.APIResource):
OBJECT_NAME = "myresource"
@stripe.api_resources.abstract.custom_method(
"do_stuff", http_verb="post", http_path="do_the_thing"
)
class TestHelpers(APIResourceTestHelpers):
def __init__(self, resource):
self.resource = resource
def do_stuff(self, idempotency_key=None, **params):
url = self.instance_url() + "/do_the_thing"
headers = util.populate_headers(idempotency_key)
self.resource.refresh_from(
self.resource.request("post", url, params, headers)
)
return self.resource
def test_call_custom_method_class(self, request_mock):
request_mock.stub_request(
"post",
"/v1/test_helpers/myresources/mid/do_the_thing",
{"id": "mid", "thing_done": True},
rheaders={"request-id": "req_id"},
)
obj = self.MyTestHelpersResource.TestHelpers.do_stuff("mid", foo="bar")
request_mock.assert_requested(
"post",
"/v1/test_helpers/myresources/mid/do_the_thing",
{"foo": "bar"},
)
assert obj.thing_done is True
def test_call_custom_method_instance_via_property(self, request_mock):
request_mock.stub_request(
"post",
"/v1/test_helpers/myresources/mid/do_the_thing",
{"id": "mid", "thing_done": True},
rheaders={"request-id": "req_id"},
)
obj = self.MyTestHelpersResource.construct_from({"id": "mid"}, "mykey")
obj.test_helpers.do_stuff(foo="bar")
request_mock.assert_requested(
"post",
"/v1/test_helpers/myresources/mid/do_the_thing",
{"foo": "bar"},
)
assert obj.thing_done is True
def test_helper_decorator_raises_for_non_resource(self):
with pytest.raises(ValueError):
stripe.api_resources.abstract.test_helpers(str)
| mit | 6af9455e9ad9165c6be8889d4f7ed385 | 34.621212 | 79 | 0.592088 | 3.810373 | false | true | false | false |
okfn/website | foundation/press/cms_menus.py | 2 | 1126 | # flake8: noqa
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from .models import PressRelease, PressMention
class PressReleaseMenu(CMSAttachMenu):
name = _("Press Releases")
def get_nodes(self, request):
nodes = []
for release in PressRelease.objects.all():
node = NavigationNode(
release.title,
release.get_absolute_url(),
release.pk,
)
nodes.append(node)
return nodes
menu_pool.register_menu(PressReleaseMenu)
# class PressMentionMenu(CMSAttachMenu):
# name = _("Press Mentions")
# def get_nodes(self, request):
# nodes = []
# for mention in PressMention.objects.all():
# node = NavigationNode(
# mention.title,
# mention.get_absolute_url(),
# mention.pk,
# )
# nodes.append(node)
# return nodes
# menu_pool.register_menu(PressMentionMenu)
| mit | 94d59110598b1a3ffb8a4190b5523c17 | 24.590909 | 55 | 0.58881 | 3.978799 | false | false | false | false |
stripe/stripe-python | stripe/api_resources/list_object.py | 1 | 4503 | from __future__ import absolute_import, division, print_function
from stripe import six, util
from stripe.stripe_object import StripeObject
from stripe.six.moves.urllib.parse import quote_plus
class ListObject(StripeObject):
OBJECT_NAME = "list"
def list(
self, api_key=None, stripe_version=None, stripe_account=None, **params
):
return self._request(
"get",
self.get("url"),
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
def create(
self,
api_key=None,
idempotency_key=None,
stripe_version=None,
stripe_account=None,
**params
):
return self._request(
"post",
self.get("url"),
api_key=api_key,
idempotency_key=idempotency_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
def retrieve(
self,
id,
api_key=None,
stripe_version=None,
stripe_account=None,
**params
):
url = "%s/%s" % (self.get("url"), quote_plus(util.utf8(id)))
return self._request(
"get",
url,
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
params=params,
)
def __getitem__(self, k):
if isinstance(k, six.string_types):
return super(ListObject, self).__getitem__(k)
else:
raise KeyError(
"You tried to access the %s index, but ListObject types only "
"support string keys. (HINT: List calls return an object with "
"a 'data' (which is the data array). You likely want to call "
".data[%s])" % (repr(k), repr(k))
)
def __iter__(self):
return getattr(self, "data", []).__iter__()
def __len__(self):
return getattr(self, "data", []).__len__()
def __reversed__(self):
return getattr(self, "data", []).__reversed__()
def auto_paging_iter(self):
page = self
while True:
if (
"ending_before" in self._retrieve_params
and "starting_after" not in self._retrieve_params
):
for item in reversed(page):
yield item
page = page.previous_page()
else:
for item in page:
yield item
page = page.next_page()
if page.is_empty:
break
@classmethod
def empty_list(
cls, api_key=None, stripe_version=None, stripe_account=None
):
return cls.construct_from(
{"data": []},
key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
last_response=None,
)
@property
def is_empty(self):
return not self.data
def next_page(
self, api_key=None, stripe_version=None, stripe_account=None, **params
):
if not self.has_more:
return self.empty_list(
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
)
last_id = self.data[-1].id
params_with_filters = self._retrieve_params.copy()
params_with_filters.update({"starting_after": last_id})
params_with_filters.update(params)
return self.list(
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
**params_with_filters
)
def previous_page(
self, api_key=None, stripe_version=None, stripe_account=None, **params
):
if not self.has_more:
return self.empty_list(
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
)
first_id = self.data[0].id
params_with_filters = self._retrieve_params.copy()
params_with_filters.update({"ending_before": first_id})
params_with_filters.update(params)
return self.list(
api_key=api_key,
stripe_version=stripe_version,
stripe_account=stripe_account,
**params_with_filters
)
| mit | 2963b7c707f31414549d83d9d1844799 | 27.320755 | 79 | 0.522096 | 4.053105 | false | false | false | false |
python-provy/provy | provy/more/centos/package/pip.py | 1 | 12688 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Roles in this namespace are meant to provision packages installed via the `PIP <http://www.pip-installer.org/>`_ package manager for CentOS distributions.
'''
import xmlrpclib
from fabric.api import settings
from provy.core import Role
from provy.more.centos.package.yum import YumRole
class PipRole(Role):
'''
This role provides package management operations with `PIP <http://www.pip-installer.org/>`_ within CentOS distributions.
Example:
::
from provy.core import Role
from provy.more.centos import PipRole
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
role.ensure_package_installed('django', version='1.1.1')
'''
use_sudo = True
user = None
def provision(self):
'''
Installs pip dependencies. This method should be called upon if overriden in base classes, or PIP won't work properly in the remote server.
Example:
::
class MySampleRole(Role):
def provision(self):
self.provision_role(PipRole) # does not need to be called if using with block.
'''
with self.using(YumRole) as role:
role.ensure_up_to_date()
role.ensure_package_installed('python-setuptools')
role.ensure_package_installed('python-devel')
role.ensure_package_installed('gcc')
self.execute("easy_install pip", sudo=True, stdout=False, user=None)
def extract_package_data_from_input(self, input_line):
package_constraint = None
input_line = input_line.strip()
package_info = {
"name": input_line
}
if input_line.startswith("-e") and "#egg=" in input_line:
data = input_line.split("#egg=")
package_info["name"] = data[1]
elif "==" in input_line:
package_constraint = "=="
elif '>=' in input_line:
package_constraint = ">="
if package_constraint:
package_info['version_constraint'] = package_constraint
data = input_line.split(package_constraint)
package_info["name"] = data[0]
package_info["version"] = data[1]
return package_info
def is_package_installed(self, package_name, version=None):
'''
Returns :data:`True` if the given package is installed via pip in the remote server, :data:`False` otherwise.
:param package_name: Name of the package to verify
:type package_name: :class:`str`
:param version: Version to check for. Defaults to :data:`None`, which makes it check for any version.
:type version: :class:`str`
:return: Whether the package is installed or not.
:rtype: :class:`bool`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
if role.is_package_installed('django', version='1.1.1'):
pass
'''
with settings(warn_only=True):
package_info = self.extract_package_data_from_input(package_name)
if not version:
package_name = package_info['name']
package_string = self.execute("pip freeze | tr '[A-Z]' '[a-z]' | grep %s" % package_name, stdout=False, sudo=self.use_sudo, user=self.user)
if package_name in package_string:
installed_version = package_string.split('==')[-1]
if 'version' in package_info:
if '>=' == package_info['version_constraint']:
if installed_version < package_info['version']:
return False
elif version and installed_version != version:
return False
return True
def get_package_remote_version(self, package_name):
'''
Returns the version of the package currently installed via PIP in the remote server. If package is not installed, returns :data:`None`.
:param package_name: Name of the package to verify
:type package_name: :class:`str`
:return: The package version.
:rtype: :class:`str`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
version = role.get_package_remote_version('django')
if version and version == '1.1.1':
pass
'''
with settings(warn_only=True):
result = self.execute("pip freeze | tr '[A-Z]' '[a-z]' | grep %s" % package_name.lower(), stdout=False, sudo=self.use_sudo, user=self.user)
if result:
package, version = result.split('==')
return version
return None
def get_package_latest_version(self, package_name):
'''
Returns the latest available version of the package at the Python Package Index. If package is not available, returns :data:`None`.
:param package_name: Name of the package to verify
:type package_name: :class:`str`
:return: The package version.
:rtype: :class:`str`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
version = role.get_package_remote_version('django')
latest = role.get_package_latest_version('django')
if version != latest:
pass
# this check is not needed if you use ensure_package_up_to_date.
'''
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
available = pypi.package_releases(package_name)
if not available:
# Try to capitalize pkg name
available = pypi.package_releases(package_name.capitalize())
if not available:
return None
return available[0]
def package_can_be_updated(self, package_name):
'''
Returns :data:`True` if there is an update for the given package in the Python Package Index, False otherwise.
:param package_name: Name of the package to verify
:type package_name: :class:`str`
:return: Whether the package can be updated.
:rtype: :class:`bool`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
if role.package_can_be_updated('django'):
pass
# this check is not needed if you use ensure_package_up_to_date.
'''
remote_version = self.get_package_remote_version(package_name)
latest_version = self.get_package_latest_version(package_name)
return remote_version != latest_version
def ensure_package_installed(self, package_name, version=None):
'''
Makes sure the package is installed with the specified version (latest if :data:`None` specified).
This method does not verify and upgrade the package on subsequent provisions, though. Use :meth:`ensure_package_up_to_date` for this purpose instead.
:param package_name: Name of the package to install.
:type package_name: :class:`str`
:param version: If specified, installs this version of the package. Installs latest version otherwise. You can use >= or <= before version number to ensure package version.
:type version: :class:`str`
:return: Whether the package had to be installed or not.
:rtype: :class:`bool`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
role.ensure_package_installed('django', version='1.1.1')
'''
if version:
package_info = self.extract_package_data_from_input(version)
version_constraint = package_info.get('version_constraint', '==')
version = package_info.get('version', version)
if not self.is_package_installed(package_name, version):
self.log('%s version %s should be installed (via pip)! Rectifying that...' % (package_name, version))
self.execute('pip install %s%s%s' % (package_name, version_constraint, version), stdout=False, sudo=self.use_sudo, user=self.user)
self.log('%s version %s installed!' % (package_name, version))
return True
elif not self.is_package_installed(package_name):
self.log('%s is not installed (via pip)! Installing...' % package_name)
self.execute('pip install %s' % package_name, stdout=False, sudo=self.use_sudo, user=self.user)
self.log('%s installed!' % package_name)
return True
return False
def ensure_requirements_installed(self, requirements_file_name):
'''
Makes sure the requirements file provided is installed.
:param requirements_file_name: Path to the requirements file (can be provided as absolute path or relative to the directory where provy is run from).
:type requirements_file_name: :class:`str`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
role.ensure_requirements_installed('/path/to/requirements.txt')
'''
with open(requirements_file_name, 'r') as requirements_file:
for requirement in requirements_file.readlines():
self.ensure_package_installed(requirement.strip())
def ensure_package_up_to_date(self, package_name):
'''
Makes sure the package is installed and up-to-date with the latest version.
This method verifies if there is a newer version for this package every time the server is provisioned. If a new version is found, it is installed.
:param package_name: Name of the package to install.
:type package_name: :class:`str`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
role.ensure_package_is_up_to_date('django')
'''
is_installed = self.is_package_installed(package_name)
if is_installed and self.package_can_be_updated(package_name):
self.log('%s is installed (via pip)! Updating...' % package_name)
self.execute('pip install -U --no-dependencies %s' % package_name, stdout=False, sudo=self.use_sudo, user=self.user)
self.log('%s updated!' % package_name)
return True
elif not is_installed:
self.ensure_package_installed(package_name)
return True
self.log('%s is up to date (via pip).' % package_name)
return False
def set_user(self, user):
'''
Prepares the pip role instance to run its commands as a specific user.
:param user: The username with which the role should run its commands.
:type user: :class:`str`
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
role.ensure_package_installed('django') # runs as sudo
role.set_user('johndoe')
role.ensure_package_installed('django') # runs as "johndoe" user
'''
self.user = user
self.use_sudo = False
def set_sudo(self):
'''
Prepares the pip role instance to run its commands with sudo; This is useful when you had previously set a user, and want it to run back as sudo.
Example:
::
class MySampleRole(Role):
def provision(self):
with self.using(PipRole) as role:
role.ensure_package_installed('django') # runs as sudo
role.set_user('johndoe')
role.ensure_package_installed('django') # runs as "johndoe" user
role.set_sudo()
role.ensure_package_installed('django') # runs as sudo
'''
self.user = None
self.use_sudo = True
| mit | 90829b62b186dca94ed739314f8c3b7f | 38.52648 | 180 | 0.574874 | 4.419366 | false | false | false | false |
drj11/pypng | code/pngsuite.py | 1 | 36500 | #!/usr/bin/env python
# pngsuite.py
# PngSuite Test PNGs.
# https://docs.python.org/3.2/library/argparse.html
import argparse
import sys
"""
After you import this module with "import pngsuite" use
``pngsuite.bai0g01`` to get the bytes for a particular PNG image, or
use ``pngsuite.png`` to get a dict() of them all.
Also a delicious command line tool.
"""
def _dehex(s):
"""Liberally convert from hex string to binary string."""
import binascii
return binascii.unhexlify(s.replace(b"\n", b""))
# Copies of PngSuite test files taken
# from http://www.schaik.com/pngsuite/pngsuite_bas_png.html
# on 2009-02-19 by drj and converted to hex.
# Some of these are not actually in PngSuite (but maybe they should
# be?), they use the same naming scheme, but start with a capital
# letter.
png = {
'basi0g01': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002001000000012c0677
cf0000000467414d41000186a031e8965f0000009049444154789c2d8d310ec2
300c45dfc682c415187a00a42e197ab81e83b127e00c5639001363a580d8582c
65c910357c4b78b0bfbfdf4f70168c19e7acb970a3f2d1ded9695ce5bf5963df
d92aaf4c9fd927ea449e6487df5b9c36e799b91bdf082b4d4bd4014fe4014b01
ab7a17aee694d28d328a2d63837a70451e1648702d9a9ff4a11d2f7a51aa21e5
a18c7ffd0094e3511d661822f20000000049454e44ae426082
"""),
'basi0g02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002002000000016ba60d
1f0000000467414d41000186a031e8965f0000005149444154789c635062e860
00e17286bb609c93c370ec189494960631366e4467b3ae675dcf10f521ea0303
90c1ca006444e11643482064114a4852c710baea3f18c31918020c30410403a6
0ac1a09239009c52804d85b6d97d0000000049454e44ae426082
"""),
'basi0g04': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200400000001e4e6f8
bf0000000467414d41000186a031e8965f000000ae49444154789c658e5111c2
301044171c141c141c041c843a287510ea20d441c041c141c141c04191102454
03994998cecd7edcecedbb9bdbc3b2c2b6457545fbc4bac1be437347f7c66a77
3c23d60db15e88f5c5627338a5416c2e691a9b475a89cd27eda12895ae8dfdab
43d61e590764f5c83a226b40d669bec307f93247701687723abf31ff83a2284b
a5b4ae6b63ac6520ad730ca4ed7b06d20e030369bd6720ed383290360406d24e
13811f2781eba9d34d07160000000049454e44ae426082
"""),
'basi0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000001211615
be0000000467414d41000186a031e8965f000000b549444154789cb5905d0ac2
3010849dbac81c42c47bf843cf253e8878b0aa17110f214bdca6be240f5d21a5
94ced3e49bcd322c1624115515154998aa424822a82a5624a1aa8a8b24c58f99
999908130989a04a00d76c2c09e76cf21adcb209393a6553577da17140a2c59e
70ecbfa388dff1f03b82fb82bd07f05f7cb13f80bb07ad2fd60c011c3c588eef
f1f4e03bbec7ce832dca927aea005e431b625796345307b019c845e6bfc3bb98
769d84f9efb02ea6c00f9bb9ff45e81f9f280000000049454e44ae426082
"""),
'basi0g16': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002010000000017186c9
fd0000000467414d41000186a031e8965f000000e249444154789cb5913b0ec2
301044c7490aa8f85d81c3e4301c8f53a4ca0da8902c8144b3920b4043111282
23bc4956681a6bf5fc3c5a3ba0448912d91a4de2c38dd8e380231eede4c4f7a1
4677700bec7bd9b1d344689315a3418d1a6efbe5b8305ba01f8ff4808c063e26
c60d5c81edcf6c58c535e252839e93801b15c0a70d810ae0d306b205dc32b187
272b64057e4720ff0502154034831520154034c3df81400510cdf0015c86e5cc
5c79c639fddba9dcb5456b51d7980eb52d8e7d7fa620a75120d6064641a05120
b606771a05626b401a05f1f589827cf0fe44c1f0bae0055698ee8914fffffe00
00000049454e44ae426082
"""),
'basi2c08': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002008020000018b1fdd
350000000467414d41000186a031e8965f000000f249444154789cd59341aa04
210c44abc07b78133d59d37333bd89d76868b566d10cf4675af8596431a11662
7c5688919280e312257dd6a0a4cf1a01008ee312a5f3c69c37e6fcc3f47e6776
a07f8bdaf5b40feed2d33e025e2ff4fe2d4a63e1a16d91180b736d8bc45854c5
6d951863f4a7e0b66dcf09a900f3ffa2948d4091e53ca86c048a64390f662b50
4a999660ced906182b9a01a8be00a56404a6ede182b1223b4025e32c4de34304
63457680c93aada6c99b73865aab2fc094920d901a203f5ddfe1970d28456783
26cffbafeffcd30654f46d119be4793f827387fc0d189d5bc4d69a3c23d45a7f
db803146578337df4d0a3121fc3d330000000049454e44ae426082
"""),
'basi2c16': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000201002000001db8f01
760000000467414d41000186a031e8965f0000020a49444154789cd5962173e3
3010853fcf1838cc61a1818185a53e56787fa13fa130852e3b5878b4b0b03081
b97f7030070b53e6b057a0a8912bbb9163b9f109ececbc59bd7dcf2b45492409
d66f00eb1dd83cb5497d65456aeb8e1040913b3b2c04504c936dd5a9c7e2c6eb
b1b8f17a58e8d043da56f06f0f9f62e5217b6ba3a1b76f6c9e99e8696a2a72e2
c4fb1e4d452e92ec9652b807486d12b6669be00db38d9114b0c1961e375461a5
5f76682a85c367ad6f682ff53a9c2a353191764b78bb07d8ddc3c97c1950f391
6745c7b9852c73c2f212605a466a502705c8338069c8b9e84efab941eb393a97
d4c9fd63148314209f1c1d3434e847ead6380de291d6f26a25c1ebb5047f5f24
d85c49f0f22cc1d34282c72709cab90477bf25b89d49f0f351822297e0ea9704
f34c82bc94002448ede51866e5656aef5d7c6a385cb4d80e6a538ceba04e6df2
480e9aa84ddedb413bb5c97b3838456df2d4fec2c7a706983e7474d085fae820
a841776a83073838973ac0413fea2f1dc4a06e71108fda73109bdae48954ad60
bf867aac3ce44c7c1589a711cf8a81df9b219679d96d1cec3d8bbbeaa2012626
df8c7802eda201b2d2e0239b409868171fc104ba8b76f10b4da09f6817ffc609
c413ede267fd1fbab46880c90f80eccf0013185eb48b47ba03df2bdaadef3181
cb8976f18e13188768170f98c0f844bb78cb04c62ddac59d09fc3fa25dfc1da4
14deb3df1344f70000000049454e44ae426082
"""),
'basi3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000133a3ba
500000000467414d41000186a031e8965f00000300504c5445224400f5ffed77
ff77cbffff110a003a77002222ffff11ff110000222200ffac5566ff66ff6666
ff01ff221200dcffffccff994444ff005555220000cbcbff44440055ff55cbcb
00331a00ffecdcedffffe4ffcbffdcdc44ff446666ff330000442200ededff66
6600ffa444ffffaaeded0000cbcbfefffffdfffeffff0133ff33552a000101ff
8888ff00aaaa010100440000888800ffe4cbba5b0022ff22663200ffff99aaaa
ff550000aaaa00cb630011ff11d4ffaa773a00ff4444dc6b0066000001ff0188
4200ecffdc6bdc00ffdcba00333300ed00ed7300ffff88994a0011ffff770000
ff8301ffbabafe7b00fffeff00cb00ff999922ffff880000ffff77008888ffdc
ff1a33000000aa33ffff009900990000000001326600ffbaff44ffffffaaff00
770000fefeaa00004a9900ffff66ff22220000998bff1155ffffff0101ff88ff
005500001111fffffefffdfea4ff4466ffffff66ff003300ffff55ff77770000
88ff44ff00110077ffff006666ffffed000100fff5ed1111ffffff44ff22ffff
eded11110088ffff00007793ff2200dcdc3333fffe00febabaff99ffff333300
63cb00baba00acff55ffffdcffff337bfe00ed00ed5555ffaaffffdcdcff5555
00000066dcdc00dc00dc83ff017777fffefeffffffcbff5555777700fefe00cb
00cb0000fe010200010000122200ffff220044449bff33ffd4aa0000559999ff
999900ba00ba2a5500ffcbcbb4ff66ff9b33ffffbaaa00aa42880053aa00ffaa
aa0000ed00babaffff1100fe00000044009999990099ffcc99ba000088008800
dc00ff93220000dcfefffeaa5300770077020100cb0000000033ffedff00ba00
ff3333edffedffc488bcff7700aa00660066002222dc0000ffcbffdcffdcff8b
110000cb00010155005500880000002201ffffcbffcbed0000ff88884400445b
ba00ffbc77ff99ff006600baffba00777773ed00fe00003300330000baff77ff
004400aaffaafffefe000011220022c4ff8800eded99ff99ff55ff002200ffb4
661100110a1100ff1111dcffbabaffff88ff88010001ff33ffb98ed362000002
a249444154789c65d0695c0b001806f03711a9904a94d24dac63292949e5a810
d244588a14ca5161d1a1323973252242d62157d12ae498c8124d25ca3a11398a
16e55a3cdffab0ffe7f77d7fcff3528645349b584c3187824d9d19d4ec2e3523
9eb0ae975cf8de02f2486d502191841b42967a1ad49e5ddc4265f69a899e26b5
e9e468181baae3a71a41b95669da8df2ea3594c1b31046d7b17bfb86592e4cbe
d89b23e8db0af6304d756e60a8f4ad378bdc2552ae5948df1d35b52143141533
33bbbbababebeb3b3bc9c9c9c6c6c0c0d7b7b535323225a5aa8a02024a4bedec
0a0a2a2bcdcd7d7cf2f3a9a9c9cdcdd8b8adcdd5b5ababa828298982824a4ab2
b21212acadbdbc1414e2e24859b9a72730302f4f49292c4c57373c9c0a0b7372
8c8c1c1c3a3a92936d6dfdfd293e3e26262a4a4eaea2424b4b5fbfbc9c323278
3c0b0ba1303abaae8ecdeeed950d6669a9a7a7a141d4de9e9d5d5cdcd2229b94
c572716132f97cb1d8db9bc3110864a39795d9db6b6a26267a7a9a98d4d6a6a7
cb76090ef6f030354d4d75766e686030545464cb393a1a1ac6c68686eae8f8f9
a9aa4644c8b66d6e1689dcdd2512a994cb35330b0991ad9f9b6b659596a6addd
d8282fafae5e5323fb8f41d01f76c22fd8061be01bfc041a0323e1002c81cd30
0b9ec027a0c930014ec035580fc3e112bc069a0b53e11c0c8095f00176c163a0
e5301baec06a580677600ddc05ba0f13e120bc81a770133ec355a017300d4ec2
0c7800bbe1219c02fa08f3e13c1c85dbb00a2ec05ea0dff00a6ec15a98027360
070c047a06d7e1085c84f1b014f6c03fa0b33018b6c0211801ebe018fc00da0a
6f61113c877eb01d4ec317a085700f26c130f80efbe132bc039a0733e106fc81
f7f017f6c10aa0d1300a0ec374780943e1382c06fa0a9b60238c83473016cec0
02f80f73fefe1072afc1e50000000049454e44ae426082
"""),
'basi6a08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200806000001047d4a
620000000467414d41000186a031e8965f0000012049444154789cc595414ec3
3010459fa541b8bbb26641b8069b861e8b4d12c1c112c1452a710a2a65d840d5
949041fc481ec98ae27c7f3f8d27e3e4648047600fec0d1f390fbbe2633a31e2
9389e4e4ea7bfdbf3d9a6b800ab89f1bd6b553cfcbb0679e960563d72e0a9293
b7337b9f988cc67f5f0e186d20e808042f1c97054e1309da40d02d7e27f92e03
6cbfc64df0fc3117a6210a1b6ad1a00df21c1abcf2a01944c7101b0cb568a001
909c9cf9e399cf3d8d9d4660a875405d9a60d000b05e2de55e25780b7a5268e0
622118e2399aab063a815808462f1ab86890fc2e03e48bb109ded7d26ce4bf59
0db91bac0050747fec5015ce80da0e5700281be533f0ce6d5900b59bcb00ea6d
200314cf801faab200ea752803a8d7a90c503a039f824a53f4694e7342000000
0049454e44ae426082
"""),
'basn0g01': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002001000000005b0147
590000000467414d41000186a031e8965f0000005b49444154789c2dccb10903
300c05d1ebd204b24a200b7a346f90153c82c18d0a61450751f1e08a2faaead2
a4846ccea9255306e753345712e211b221bf4b263d1b427325255e8bdab29e6f
6aca30692e9d29616ee96f3065f0bf1f1087492fd02f14c90000000049454e44
ae426082
"""),
'basn0g02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002002000000001ca13d
890000000467414d41000186a031e8965f0000001f49444154789c6360085df5
1f8cf1308850c20053868f0133091f6390b90700bd497f818b0989a900000000
49454e44ae426082
"""),
# A version of basn0g04 dithered down to 3 bits.
'Basn0g03': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
2900000001734249540371d88211000000fd49444154789c6d90d18906210c84
c356f22356b2889588604301b112112b11d94a96bb495cf7fe87f32d996f2689
44741cc658e39c0b118f883e1f63cc89dafbc04c0f619d7d898396c54b875517
83f3a2e7ac09a2074430e7f497f00f1138a5444f82839c5206b1f51053cca968
63258821e7f2b5438aac16fbecc052b646e709de45cf18996b29648508728612
952ca606a73566d44612b876845e9a347084ea4868d2907ff06be4436c4b41a3
a3e1774285614c5affb40dbd931a526619d9fa18e4c2be420858de1df0e69893
a0e3e5523461be448561001042b7d4a15309ce2c57aef2ba89d1c13794a109d7
b5880aa27744fc5c4aecb5e7bcef5fe528ec6293a930690000000049454e44ae
426082
"""),
'basn0g04': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000004849444154789c6360601014
545232367671090d4d4b2b2f6720430095dbd1418e002a77e64c720450b9ab56
912380caddbd9b1c0154ee9933e408a072efde25470095fbee1d1902001f14ee
01eaff41fa0000000049454e44ae426082
"""),
'basn0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
280000000467414d41000186a031e8965f0000004149444154789c6364602400
1408c8b30c05058c0f0829f8f71f3f6079301c1430ca11906764a2795c0c0605
8c8ff0cafeffcff887e67131181430cae0956564040050e5fe7135e2d8590000
000049454e44ae426082
"""),
'basn0g16': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002010000000000681f9
6b0000000467414d41000186a031e8965f0000005e49444154789cd5d2310ac0
300c4351395bef7fc6dca093c0287b32d52a04a3d98f3f3880a7b857131363a0
3a82601d089900dd82f640ca04e816dc06422640b7a03d903201ba05b7819009
d02d680fa44c603f6f07ec4ff41938cf7f0016d84bd85fae2b9fd70000000049
454e44ae426082
"""),
'basn2c08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000004849444154789cedd5c10900
300c024085ec91fdb772133b442bf4a1f8cee12bb40d043b800a14f81ca0ede4
7d4c784081020f4a871fc284071428f0a0743823a94081bb7077a3c00182b1f9
5e0f40cf4b0000000049454e44ae426082
"""),
'basn2c16': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000201002000000ac8831
e00000000467414d41000186a031e8965f000000e549444154789cd596c10a83
301044a7e0417fcb7eb7fdadf6961e06039286266693cc7a188645e43dd6a08f
1042003e2fe09aef6472737e183d27335fcee2f35a77b702ebce742870a23397
f3edf2705dd10160f3b2815fe8ecf2027974a6b0c03f74a6e4192843e75c6c03
35e8ec3202f5e84c0181bbe8cca967a00d9df3491bb040671f2e6087ce1c2860
8d1e05f8c7ee0f1d00b667e70df44467ef26d01fbd9bc028f42860f71d188bce
fb8d3630039dbd59601e7ab3c06cf428507f0634d039afdc80123a7bb1801e7a
b1802a7a14c89f016d74ce331bf080ce9e08f8414f04bca133bfe642fe5e07bb
c4ec0000000049454e44ae426082
"""),
'basn3p04': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200403000000815467
c70000000467414d41000186a031e8965f000000037342495404040477f8b5a3
0000002d504c54452200ff00ffff8800ff22ff000099ffff6600dd00ff77ff00
ff000000ff99ddff00ff00bbffbb000044ff00ff44d2b049bd00000047494441
54789c63e8e8080d3d7366d5aaf27263e377ef66ce64204300952b28488e002a
d7c5851c0154eeddbbe408a07119c81140e52a29912380ca4d4b23470095bb7b
37190200e0c4ead10f82057d0000000049454e44ae426082
"""),
'basn4a16': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020100400000089e36e
3c0000000467414d41000186a031e8965f0000085549444154789cc5975f685b
e719c67f968fa4a363ebf84875524754ae9d283885121aba42ba2d17b1bd8e50
d22e253412bbc8e4d042694b618977119d8b5d48be98938bd0f4a2c9901658b0
1a028366258524a68cd27a84d2e2956da169ea4aade219574791ed63fd399677
f17e19a174d73518994fc7d2fb3eeff33ecff30160656158873da760d48217ce
c2b10138fe47c80ec1d93fc3c55df0de65f8e809f8e75fe1ee5e58bf2ebf77f7
cad9474fc8331777c9ff6487e4338e0dc8678e5af21dc3ba7c27806665a1665b
b9ae19f015a1bb025a1102bb217008f42684de86e6756817c1d36063043acf02
6fc887749272e669d05e90679b29589f04f710ac5d825503ea15a8a7a056805a
0aac6c2dd335ac43ad60e59c54241b75e121171e5aff3faf3f7006f09d01df85
bef7fa4367eab56a4064c6b1ad742da35959e9bccb85aa61657d27a13b03fed3
10c8807e124219e8c9403303ed0c7827a19381cd8c4220075e0eda53d0cc4123
076e0ed672b03205f51cd472e0e4a03a0551b76647526066418b6405769f0bbe
93b03c15c9fae6401b03ff97a05f84d022f48c41c383d687e09d868dc3b0f988
14b07158ce5a1fca33ee53b0f63aacdc807bc7c0d902d583b03c0bfd271d3be2
42df0c9831c501ad08da2fa473df1c2ccd5a59dfa3a0ed83600cf4bd601c8170
1a1a67a13d011bfdb0f91355c03cb4af40230febbf83d502d4d7a0f62fa8b660
f9362ccdc2d6d19a1dcd805505f35de8bd8f406037f87f26b06b63e07b14160b
91acef0cf83f80e00a1825089f80f53a34df026f0536af4a01de889cadfb61f5
04d44be0bc00cb4761c984c5020ca41dbb3f01910c98af40b8083df30a81c021
089465e6fe2fa573df19a89856568b4370108c41080f8235088d4168ef81cea0
14d02e41a3046b25a8ff1d9c122c97e03f25a8942156afd95b3f836800fa7640
f85be89901e32f0a01bd09fa1e219c7e5160f77f005a1c4ae54856d340d7a1b7
172c0b5c175a2de874a480564bceea75a8566169092a1528956130eed80fd7a1
7f02ac0a847f0d3d69308a109a560884de86d02e617b6851661e5c91ce350dee
7c6565fdfbc1380ad6046c39068d51e8fc460a68e4616516aa0558cc43390f77
6ec0f6e19a1d8b41ff0a44d260cec936195f42a808c1fb1c685e07e35379b367
4c08679404765d07ff7eb8958f64838f415f0db66c037714bc5352803b0ad549
b85b83858fe1561e46261c3bfe356cdd0a913a9813d0db034606f42404672038
ae106817a115973d6f78c2f6f00999796faf741e7c0ce627adac5186fe323c6a
43fb7329a06643250e5f7c02f371d83d5db3879e86810b108d82b902bd6908fd
01f46720f80f0814c17f1f014f83f66b2232ad0f65d5d6eb4238cb12d8fb6a60
94612e1ec94612309c8046420a58bc06ffbe0d73b7616fd9b1773e09db2c88a6
c134a1a70ea134e86310f839f89f077f11344f21b031021bd744e1bcd3b2e7cd
b784edae2b33dfb24d3a8f24e0ea6d2b1bdf0f3f3d2a057c7e0eaebe0f071235
7b571962a7207a17c2436018a07f07c157c17f10b4e3a0dd84ee19e8bea510e8
3c0b1d43e475e3b0888cb722abd66a09e1dc51817d3801f1fd70ee7c243b3e2e
059c3b0f2fbfe4d88f9761a00cd63418b3a02f402000fe05d0d2d0bd5b89dd2f
45fe7def290478033693a2ed9b8f88c26d5e953def7484edde29997923219d8f
8fc38b47c4542fbd53b3b76f87be0ba07f03fe53a04d80ef4fe0f381af0e5d13
d0d5075d23d0f537e82a0267c0c78ffca3d56cf1f38e21aeb67158b4dd1b1185
6bb564cfdd5161fbe23599f9b9f3d239c08b47e0e597e0f1320cec03eb841ac1
1d350213b4bc1ac165358224f86cd01cfb0112e61409af28129684842bb3b2e7
95b8b0fdeafb32f3eddba58b975f92820e2460571c629310cd3f40c230040b8a
843945c2e7a07b8f42e07f6b38a5d6302fc6b25652f25a1091f9e21359b50389
9afd7859660ed2f981045cbd0d4e1c76feea7b6bb80d4279d05f834053ad614a
ada1634b8c6a855498f094a59e1063a956455e173e1691d95b76ec5d8aedfa37
52c0c03ee9dc89c35c1cdc69b8f7a0108d40ef2908dd005d53429404ff9042a0
791d9a9faa24f394f2f392b8dad29268fbadbc28dcce2765cfad69613bc8cc63
93d2b93b0df393d09c00f76b585d854818cc02f4be03c64d25c54925c58ead02
e4ef558c7a5dc284f382586aa522c63232e1d8434f2b68ef0ac9b40929c09895
996fb3a4f3e68414dc1e8646035c13dcbc32a3379519a520682b04d627c10da9
0c774392ccf251f1f352595c2dfeb582342de4d21764cf41d81e1e92f7062e48
e7ed61b8f315781e34c3d02c40a302e19cb2e32484ee6f817b08d6ca1220ef1d
9318b5644a98188c3b762c26ae168d0aa90c43d6cba75424109033d394675657
a573cf93063c13da796806a0b1031adf422309465021b0760956de94f4ea6c91
0cb7589024f3705df9795d5cada72edaee5f108503d9733d2c6c374764e6ae29
9d7b26544ce8a4c14b28e77d055a2968cd2b04560da81b129dab0725400ea41d
7beb6792642269e5e76971b5e0aba2ed5d8a035a5ef63c9417b69b059979b320
9d77d2d2506714bc2bd0ae423b09ade71402f50adc7325b72fabf4da9f900c67
55843cbd3dcacfc74450ba778bb683fced3f287b1eba216c37e764e6cd8074de
1995c63a39d8f82d7849f0620a817a0a9c19b934f49f74ec6846d26bdf0e95e1
322ac93c237eae1d1757eb1a51055c16850b3465cf8d9bc2f6704e66de2e4ae7
9d1c2c4f41c7864e0a366cf8f1af668e2d17c5c88c634752eac6f2aecaed332a
bd1625c3058a9264bad545b6ab2805f892a2edfe94285c30297b6e2485edad94
ccdc4b4ae79b33e0a46033ab3860656b192b2d7735332637969e79c9eda16949
afc17195e13c4932bef78033aa005b198b27f21a1c179109d9b26aad79219c17
13d83b69f9f29a0dff052002c70fc3e1ac750000000049454e44ae426082
"""),
'basn6a08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200806000000737a7a
f40000000467414d41000186a031e8965f0000006f49444154789cedd6310a80
300c46e12764684fa1f73f55048f21c4ddc545781d52e85028fc1f4d28d98a01
305e7b7e9cffba33831d75054703ca06a8f90d58a0074e351e227d805c8254e3
1bb0420f5cdc2e0079208892ffe2a00136a07b4007943c1004d900195036407f
011bf00052201a9c160fb84c0000000049454e44ae426082
"""),
'cs3n3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f0000000373424954030303a392a042
00000054504c544592ff0000ff9200ffff00ff0000dbff00ff6dffb600006dff
b6ff00ff9200dbff000049ffff2400ff000024ff0049ff0000ffdb00ff4900ff
b6ffff0000ff2400b6ffffdb000092ffff6d000024ffff49006dff00df702b17
0000004b49444154789c85cac70182000000b1b3625754b0edbfa72324ef7486
184ed0177a437b680bcdd0031c0ed00ea21f74852ed00a1c9ed0086da0057487
6ed0121cd6d004bda0013a421ff803224033e177f4ae260000000049454e44ae
426082
"""),
'f02n0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
280000012a49444154789c85d12f4b83511805f0c3f938168b2088200882410c
03834dd807182c588749300c5604c30b0b03c360e14d826012c162b1182c8241
100441f47dee5fc3a6f7b9efc2bdf9c7e59cf370703a3caf26d3faeae6f6fee1
f1e9f9e5f5edfde3f3ebbb31d6f910227f1a6944448c31d65aebac77de7b1f42
883146444a41b029084a41500a825210340541d1e2607f777b733d13344a7401
00c8046d127da09a4ceb5cd024010c45446a40e5a04d029827055452da247ac7
f32e80ea42a7c4a20ba0dad22e892ea0f6a06b8b3e50a9c5e85ae264d1e54fd0
e762040cb2d5e93331067af95de8b4980147adcb3128710d74dab7a54fe20ec0
ec727c313a53822109fc3ff50743122bab6b1b5b3b7b9d439d834189e5d54518
0b82b120180b82b1208882200ae217e9e497bfbfccebfd0000000049454e44ae
426082
"""),
's09n3p02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000090000000902030000009dffee
830000000467414d41000186a031e8965f000000037342495404040477f8b5a3
0000000c504c544500ff000077ffff00ffff7700ff5600640000001f49444154
789c63600002fbff0c0c56ab19182ca381581a4283f82071200000696505c36a
437f230000000049454e44ae426082
"""),
'tbgn3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000207504c54457f7f7fafafafab
abab110000222200737300999999510d00444400959500959595e6e600919191
8d8d8d620d00898989666600b7b700911600000000730d007373736f6f6faaaa
006b6b6b676767c41a00cccc0000f30000ef00d51e0055555567670000dd0051
515100d1004d4d4de61e0038380000b700160d0d00ab00560d00090900009500
009100008d003333332f2f2f2f2b2f2b2b000077007c7c001a05002b27000073
002b2b2b006f00bb1600272727780d002323230055004d4d00cc1e00004d00cc
1a000d00003c09006f6f00002f003811271111110d0d0d55554d090909001100
4d0900050505000d00e2e200000900000500626200a6a6a6a2a2a29e9e9e8484
00fb00fbd5d500801100800d00ea00ea555500a6a600e600e6f7f700e200e233
0500888888d900d9848484c01a007777003c3c05c8c8008080804409007c7c7c
bb00bbaa00aaa600a61e09056262629e009e9a009af322005e5e5e05050000ee
005a5a5adddd00a616008d008d00e20016050027270088110078780000c40078
00787300736f006f44444400aa00c81e004040406600663c3c3c090000550055
1a1a00343434d91e000084004d004d007c004500453c3c00ea1e00222222113c
113300331e1e1efb22001a1a1a004400afaf00270027003c001616161e001e0d
160d2f2f00808000001e00d1d1001100110d000db7b7b7090009050005b3b3b3
6d34c4230000000174524e530040e6d86600000001624b474402660b7c640000
01f249444154789c6360c0048c8c58049100575f215ee92e6161ef109cd2a15e
4b9645ce5d2c8f433aa4c24f3cbd4c98833b2314ab74a186f094b9c2c27571d2
6a2a58e4253c5cda8559057a392363854db4d9d0641973660b0b0bb76bb16656
06970997256877a07a95c75a1804b2fbcd128c80b482a0b0300f8a824276a9a8
ec6e61612b3e57ee06fbf0009619d5fac846ac5c60ed20e754921625a2daadc6
1967e29e97d2239c8aec7e61fdeca9cecebef54eb36c848517164514af16169e
866444b2b0b7b55534c815cc2ec22d89cd1353800a8473100a4485852d924a6a
412adc74e7ad1016ceed043267238c901716f633a812022998a4072267c4af02
92127005c0f811b62830054935ce017b38bf0948cc5c09955f030a24617d9d46
63371fd940b0827931cbfdf4956076ac018b592f72d45594a9b1f307f3261b1a
084bc2ad50018b1900719ba6ba4ca325d0427d3f6161449486f981144cf3100e
2a5f2a1ce8683e4ddf1b64275240c8438d98af0c729bbe07982b8a1c94201dc2
b3174c9820bcc06201585ad81b25b64a2146384e3798290c05ad280a18c0a62e
e898260c07fca80a24c076cc864b777131a00190cdfa3069035eccbc038c30e1
3e88b46d16b6acc5380d6ac202511c392f4b789aa7b0b08718765990111606c2
9e854c38e5191878fbe471e749b0112bb18902008dc473b2b2e8e72700000000
49454e44ae426082
"""),
'Tp2n3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000300504c544502ffff80ff05ff
7f0703ff7f0180ff04ff00ffff06ff000880ff05ff7f07ffff06ff000804ff00
0180ff02ffff03ff7f02ffff80ff0503ff7f0180ffff0008ff7f0704ff00ffff
06ff000802ffffff7f0704ff0003ff7fffff0680ff050180ff04ff000180ffff
0008ffff0603ff7f80ff05ff7f0702ffffff000880ff05ffff0603ff7f02ffff
ff7f070180ff04ff00ffff06ff000880ff050180ffff7f0702ffff04ff0003ff
7fff7f0704ff0003ff7f0180ffffff06ff000880ff0502ffffffff0603ff7fff
7f0702ffff04ff000180ff80ff05ff0008ff7f07ffff0680ff0504ff00ff0008
0180ff03ff7f02ffff02ffffffff0604ff0003ff7f0180ffff000880ff05ff7f
0780ff05ff00080180ff02ffffff7f0703ff7fffff0604ff00ff7f07ff0008ff
ff0680ff0504ff0002ffff0180ff03ff7fff0008ffff0680ff0504ff000180ff
02ffff03ff7fff7f070180ff02ffff04ff00ffff06ff0008ff7f0780ff0503ff
7fffff06ff0008ff7f0780ff0502ffff03ff7f0180ff04ff0002ffffff7f07ff
ff0604ff0003ff7fff00080180ff80ff05ffff0603ff7f0180ffff000804ff00
80ff0502ffffff7f0780ff05ffff0604ff000180ffff000802ffffff7f0703ff
7fff0008ff7f070180ff03ff7f02ffff80ff05ffff0604ff00ff0008ffff0602
ffff0180ff04ff0003ff7f80ff05ff7f070180ff04ff00ff7f0780ff0502ffff
ff000803ff7fffff0602ffffff7f07ffff0680ff05ff000804ff0003ff7f0180
ff02ffff0180ffff7f0703ff7fff000804ff0080ff05ffff0602ffff04ff00ff
ff0603ff7fff7f070180ff80ff05ff000803ff7f0180ffff7f0702ffffff0008
04ff00ffff0680ff0503ff7f0180ff04ff0080ff05ffff06ff000802ffffff7f
0780ff05ff0008ff7f070180ff03ff7f04ff0002ffffffff0604ff00ff7f07ff
000880ff05ffff060180ff02ffff03ff7f80ff05ffff0602ffff0180ff03ff7f
04ff00ff7f07ff00080180ffff000880ff0502ffff04ff00ff7f0703ff7fffff
06ff0008ffff0604ff00ff7f0780ff0502ffff03ff7f0180ffdeb83387000000
f874524e53000000000000000008080808080808081010101010101010181818
1818181818202020202020202029292929292929293131313131313131393939
393939393941414141414141414a4a4a4a4a4a4a4a52525252525252525a5a5a
5a5a5a5a5a62626262626262626a6a6a6a6a6a6a6a73737373737373737b7b7b
7b7b7b7b7b83838383838383838b8b8b8b8b8b8b8b94949494949494949c9c9c
9c9c9c9c9ca4a4a4a4a4a4a4a4acacacacacacacacb4b4b4b4b4b4b4b4bdbdbd
bdbdbdbdbdc5c5c5c5c5c5c5c5cdcdcdcdcdcdcdcdd5d5d5d5d5d5d5d5dedede
dededededee6e6e6e6e6e6e6e6eeeeeeeeeeeeeeeef6f6f6f6f6f6f6f6b98ac5
ca0000012c49444154789c6360e7169150d230b475f7098d4ccc28a96ced9e32
63c1da2d7b8e9fb97af3d1fb8f3f18e8a0808953544a4dd7c4c2c9233c2621bf
b4aab17fdacce5ab36ee3a72eafaad87efbefea68702362e7159652d031b07cf
c0b8a4cce28aa68e89f316aedfb4ffd0b92bf79fbcfcfe931e0a183904e55435
8decdcbcc22292b3caaadb7b27cc5db67af3be63e72fdf78fce2d31f7a2860e5
119356d037b374f10e8a4fc92eaa6fee99347fc9caad7b0f9ebd74f7c1db2fbf
e8a180995f484645dbdccad12f38363dafbcb6a573faeca5ebb6ed3e7ce2c29d
e76fbefda38702063e0149751d537b67ff80e8d4dcc29a86bea97316add9b0e3
c0e96bf79ebdfafc971e0a587885e515f58cad5d7d43a2d2720aeadaba26cf5a
bc62fbcea3272fde7efafac37f3a28000087c0fe101bc2f85f0000000049454e
44ae426082
"""),
'tbbn1g04': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000000274524e530007e8f7589b00
000002624b47440000aa8d23320000013e49444154789c55d1cd4b024118c7f1
efbe6419045b6a48a72d352808b435284f9187ae9b098627a1573a19945beba5
e8129e8222af11d81e3a4545742de8ef6af6d5762e0fbf0fc33c33f36085cb76
bc4204778771b867260683ee57e13f0c922df5c719c2b3b6c6c25b2382cea4b9
9f7d4f244370746ac71f4ca88e0f173a6496749af47de8e44ba8f3bf9bdfa98a
0faf857a7dd95c7dc8d7c67c782c99727997f41eb2e3c1e554152465bb00fe8e
b692d190b718d159f4c0a45c4435915a243c58a7a4312a7a57913f05747594c6
46169866c57101e4d4ce4d511423119c419183a3530cc63db88559ae28e7342a
1e9c8122b71139b8872d6e913153224bc1f35b60e4445bd4004e20ed6682c759
1d9873b3da0fbf50137dc5c9bde84fdb2ec8bde1189e0448b63584735993c209
7a601bd2710caceba6158797285b7f2084a2f82c57c01a0000000049454e44ae
426082
"""),
'tbrn2c08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000000674524e53007f007f007f8a
33334f00000006624b474400ff0000000033277cf3000004d649444154789cad
965f68537714c73fd912d640235e692f34d0406fa0c1663481045ab060065514
56660a295831607df0a1488715167060840a1614e6431e9cb34fd2c00a762c85
f6a10f816650c13b0cf40612e1822ddc4863bd628a8924d23d6464f9d3665dd9
f7e977ce3dbff3cd3939bfdfef6bb87dfb364782dbed065ebe7cd93acc78b4ec
a228debd7bb7bfbfbfbbbbfb7f261045311a8d261209405194274f9ea4d3e916
f15f1c3eb5dd6e4fa5fecce526239184a2b0b8486f6f617171b1f5ae4311381c
8e57af5e5dbd7a351088150a78bd389d44222c2f93cdfe66b7db8f4ee07038b6
b6b6bebf766d7e7e7e60a06432313b4ba984c3c1c4049a46b95c5a58583822c1
dbb76f27272733d1b9df853c3030c0f232562b9108cf9eb1b888d7cbf030abab
31abd5fa1f08dc6ef7e7cf9f1f3f7e1c8944745d4f1400c62c001313acad21cb
b8dd2c2c603271eb1640341aad4c6d331aa7e8c48913a150a861307ecc11e964
74899919bc5e14e56fffc404f1388502f178dceff7ef4bf0a5cfe7abb533998c
e5f9ea2f1dd88c180d64cb94412df3dd57e83a6b3b3c7a84c98420100c72fd3a
636348bae726379fe69e8e8d8dbd79f3a6558b0607079796965256479b918085
7b02db12712b6181950233023f3f647494ee6e2e5ea45864cce5b8a7fe3acffc
3aebb22c2bd5d20e22d0757d7b7bbbbdbd3d94a313bed1b0aa3cd069838b163a
8d4c59585f677292d0b84d9a995bd337def3fe6bbe5e6001989b9b6bfe27ea08
36373781542ab56573248b4c5bc843ac4048c7ab21aa24ca00534c25482828a3
8c9ee67475bbaaaab22cb722c8e57240a150301a8d219de94e44534d7d90e885
87acb0e2c4f9800731629b6c5ee14a35a6b9887d2a0032994cb9cf15dbe59650
ff7b46a04c9a749e7cc5112214266cc65c31354d5b5d5d3d90209bcd5616a552
a95c2e87f2a659bd9ee01c2cd73964e438f129a6aa9e582c363838b80f81d7eb
5555b56a2a8ad2d9d7affd0409f8015c208013fea00177b873831b0282c964f2
783c1e8fa7582cee5f81a669b5e6eeeeaee58e8559b0c233d8843c7c0b963a82
34e94b5cb2396d7d7d7db22c8ba258fb0afd43f0e2c58b919191ba9de9b4d425
118329b0c3323c8709d02041b52b4ea7f39de75d2a934a2693c0a953a76a93d4
5d157ebf7f6565a5542a553df97c5e10045dd731c130b86113cc300cbd489224
08422a952a140a95788fc763b1d41558d7a2d7af5f5fb870a1d6a3aaaacd6603
18802da84c59015bd2e6897b745d9765b99a1df0f97c0daf74e36deaf7fbcd66
73ad2797cb89a2c839880188a2e8743a8bc5a22ccbba5e376466b3b9bdbdbd21
6123413a9d0e0402b51e4dd3bababa788eb022b85caeb6b6364551b6b7b76942
43f7f727007a7a7a04a1ee8065b3595fde2768423299ac1ec6669c3973e65004
c0f8f878ad69341a33994ced2969c0d0d0502412f9f8f163f3a7fd654b474787
288ad53e74757535df6215b85cae60302849d2410aecc037f9f2e5cbd5b5c160
680eb0dbede170381c0e7ff8f0a185be3b906068684892a4ca7a6f6faff69328
8ad3d3d3f7efdfdfdbdbfb57e96868a14d0d0643381c96242997cbe5f3794010
84603078fcf8f1d6496bd14a3aba5c2ea7d369341a5555b5582c8140e0fcf9f3
1b1b1b87cf4eeb0a8063c78e45a3d19e9e1ebfdfdf5a831e844655d18093274f
9e3d7bf6d3a74f3b3b3b47c80efc05ff7af28fefb70d9b0000000049454e44ae
426082
"""),
'basn6a16': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020100600000023eaa6
b70000000467414d41000186a031e8965f00000d2249444154789cdd995f6c1c
d775c67ff38fb34b724d2ee55a8e4b04a0ac87049100cab4dbd8c6528902cb4d
10881620592e52d4325ac0905bc98a94025e71fd622cb5065ac98a0c283050c0
728a00b6e542a1d126885cd3298928891d9a0444037e904434951d4b90b84b2f
c9dde1fcebc33977a95555348f411e16dfce9d3b77ee77eebde77ce78c95a669
0ad07c17009a13edd898b87dfb1fcb7d2b4d1bff217f33df80deb1e6267df0ff
c1e6e6dfafdf1f5a7fd30f9aef66b6d546dd355bf02c40662e3307f9725a96c6
744c3031f83782f171c148dbc3bf1774f5dad1e79d6f095a3f54d4fbec5234ef
d9a2f8d73afe4f14f57ef4f42def7b44f19060f06b45bddf1c5534d77fd922be
2973a15a82e648661c6e3240aa3612ead952b604bde57458894f29deaf133bac
13d2766f5227a4a3b8cf08da7adfd6fbd6bd8a4fe9dbb43d35e3dfa3f844fbf8
9119bf4f7144094fb56333abf8a86063ca106f94b3a3b512343765e60082097f
1bb86ba72439a653519b09f5cee1ce61c897d37eedf5553580ae60f4af8af33a
b14fd400b6a0f34535c0434afc0b3a9f07147527a5fa7ca218ff56c74d74dc3f
155cfd3325fc278acf2ae1cb4a539f5f9937c457263b0bd51234c732a300cdd1
cc1840f0aaff54db0e4874ed5a9b5d6d27d4bb36746d80de72baa877ff4b275a
d7895ed1897ea4139b5143fcbb1a62560da1ed9662aaed895ec78a91c18795b8
5e07ab4af8ba128e95e682e0728bf8f2e5ae815a091a53d902ac1920d8e05f06
589de8d8d66680789f4e454fb9d9ec66cd857af796ee2d902fa73fd5bba775a2
153580ae44705ed0d37647d15697cb8f14bfa3e3e8fdf8031d47af571503357c
f30d25acedcbbf135c9a35c49766ba07ab255859e8ec03684e66860182dff8f7
0304bff6ff1c20fc81b7afdd00a71475539a536e36bb5973a19e3b923b02bde5
e4efd4003ac170eb2d13fe274157afedbd82d6fb3a9a1e85e4551d47cf7078f8
9671fe4289ebf5f2bf08d63f37c4eb4773c55a0996efeefa0ca011671d8060ca
2f0004c7fcc300e166ef0240f825efe3361f106d57d423d0723f7acacd66376b
2ed47b7a7a7a205f4ef4ac4691e0aad9aa0d41cf13741c3580a506487574ddca
61a8c403c1863ebfbcac3475168b2de28b8b3d77544bb05ce92a02aceced3c0d
d0cc65ea371b201cf1c601c24dde1c4078cedbdeb60322f50126a019bf6edc9b
39e566b39b3517eaf97c3e0fbde5e4491d45bd74537145d155b476aa0176e868
c6abebf30dbd5e525c54ac8e18e2d56abeb756827a3d970358a97416019a6f64
f60004fdfe1580d5c98e618070cc1b05887eee7e0d209a70db7d8063029889b4
c620ead78d7b33a7dc6c76b3e6427ddddbebde867c393aa7845e5403e8ca794a
d0d6fb897af5f03525fe5782f5e7046bdaef468bf88d1debc6ab25583cd17310
6079b9ab0ba059c914018245bf076075b5a303200c3c1f209a733701444fbbaf
00c4134ebb016c5d0b23614c243701cdf875e3decce9349bddacb9505fbf7dfd
76e82d87736a00f5d2b5ffd4b7dce2719a4d25ae717ee153c1abef18e257cfad
7fa45682da48ef38c052b53b0fd06864b300c151ff08c0ea431de701a287dd5f
004497dc7b01a253ee3e80b8c7f91c20f967fb6fdb7c80ada7d8683723614c24
3701cdf875e3decc29379bddacb950ef3fd47f08f2e5a61ea4aa2a3eb757cd55
13345efcfa59c12b2f19e2578ef77fb75a82854ffbee01a83f977b11a031931d
040802df07082b5e11207cc17b1e209a770700e2df0a83e409fb7580f827c230
99b06fd901fb058d6835dacd481813c94d40337eddb83773cacd66376b2ed437
bebcf165e82d2f4e4beb7f3fa6e652c2d7ee10bc78c010bfb87fe3c95a09ae9f
bd732740bd2fb700d0f865f64180e059ff044018ca0ca28a5b04883f701e0088
bfec7c0c909cb71f0448c6ec518074b375012079d9dedf66004bcfbc51eb2dd1
aadacd481813c94d40337eddb83773cacd66376b2ed487868686205fbe7c49ef
5605a73f34c4a7a787eeab96e0da81bb4e022c15ba27019a5b339300e16bf286
a8eae601e25866907cdf3e0890acb36f00245fb57f05904e59c300e92561946e
b2e600d209ab7d07f04d458dfb46ad1bd16ab49b913026929b8066fcba716fe6
949bcd6ed65ca8ef7e7cf7e3d05b7e7c8f217ee6cdddbb6a25a856f37980e0c7
fe4e80a82623c48193014846ec7180f4acf518409aca0cd28a5504e03b32c374
de1a00608a0240faaa327a4b19fe946fb6f90054dbb5f2333d022db56eb4966a
3723614c243701cdf8f556bea8a7dc6c76b3e66bd46584ddbbcebc0990cf4b0f
ff4070520c282338a7e26700ec725202b01e4bcf0258963c6f1d4d8f0030cb20
805549c520930c03584fa522b676f11600ffc03fde3e1b3489a9c9054c9aa23b
c08856a3dd8c843191dc0434e3d78d7b33a75c36fb993761f7ae5a69f72ef97f
e6ad336fed7e1c60e8bee96980bbdebbb60da07b7069062033d9dc0ae03d296f
70ab511ec071640676252902d833c916007b3e1900b0a6d2028035968e025861
ea01581369fb11488c34d18cbc95989afccca42baad65ba2d5683723614c24d7
8066fcbab8b7e96918baaf5aaa56219f975fb50a43f7c9bde90fa73f1c1a02d8
78f2e27e803b77ca08b90519315b6fe400fc1392097a9eccc0ad444500e70199
a1331f0f00d8934901c07e5d526ceb87c2d07e2579badd005a2b31a5089391b7
1253358049535a6add8856dd0146c298482e01ede27ed878b256ba7600ee3a09
c18fc1df09fe01084ec25defc1b56db0f1a4f4bd78e0e2818d2f0334e7330300
7df7c888b917e50dd9c1c60c80efcb0cbc63e1f700bce7c31700dccbd1060027
8add9b0de06c8e2f00d84962b7d7030e2a61538331b98051f92631bd253f336a
dd8856a3dd44c25c390efddfad96ae9f853b77c25201ba27c533b8bdf28b6ad0
3d084b33d2e7fa59099e9901b8f2d29597fa0f01848f78e70082117f1ca07b76
6910209b9519f895a008d031bbba05c09d8f06005c5b18b8fba25300cea6780e
c03e911c6ccf06d507b48a4fa606634a114609de929f9934c5a87511ad57cfc1
fa476aa5854fa1ef1e3910b905686e85cc24c40138198915f133d2d6dc2a7dea
7df2ccc2a752faf2cec1d577aebeb37e3b4034eeee0008dff3be0e6b923773b4
7904c0ef9119767cb4fa1500ef1361e08e452500f71561e84cc4ed3e20fab6a2
c905f40cb76a3026bf3319b91ac2e46792a6dcd801ebc6aba5da08f48ecb81c8
bd088d5f42f6417191de93908c803d0e76199292b485af41b60e8d9c3c537f0e
8211f0c7211a077707dc18b931b2ee6d80a4d7ae024491ebc24d4a708ff70680
7f25e807e8785f1878e322d6ddaf453f0770ff2dfa769b01423dbbad72a391b6
5a7c3235985629423372494cab55c8f7d64a8b27a0e7202c55a13b0f8d19c80e
4ae9ca3f015115dc3ca467c17a4c7ee95970ab10e5a54ff0ac3cd39881ee5958
1a84f03df0be0e492fd855a8d6aa35d10b4962dbb0a604a3d3ee5e80a8eee600
a24977f8660378bf0bbf00e01d0a8fb7f980f04b8aa6ce6aca8d5a7533c52753
839152c4e222f4dc512dd5eb90cbc981e8ea12cf90cd8a8bf47d89159e2741d3
7124f65b96fcd254dae258fa84a13c13043246a32129574787e49eae2b49b86d
c3e2e78b9ff7f4002415bb08907c66df0d103b4e0c104db90500ff70700c203a
ee1e82dba4c3e16e256c0acca6ceaae9afd1f612d7eb472157ac95962bd05594
7dd1598466053245088e827f44628657942a825b84e4fb601f84b4025611aca3
901e01bb024911dc0a4445f08e41f83df02b10142173149ab71baf027611ea95
7a257704201d14cd9af4d90b00f194530088cb4e09c0df1c5c0088f7393f6833
c0aa3ac156655de3bca9b34ab9716906ba07aba5e5bba1eb3358d90b9da7c533
64f6888bf47b60f521e8380fe10be03d2feac17900927560df40f4e48f805960
50328d648bf4893f9067c217a0631656b7c898c122847bc07b03a2d3e0ee85e4
33b0ef867450c4fad2ecd26cf7168074c0ba0c904cdac300c9cfec4701924df6
1cdca61e10685c6f7d52d0caba1498972f43d740adb4b2009d7d7220b20e3473
90a943d00ffe959bb6eac3e0fe42ea49ee00c45f06e76329b1dabf127d690d80
5581b408f63c2403e0cc433c00ee658836803b0fd100747c04ab5f917704fd10
d5c1cd41ec801343d207f602a403605d86e5f9e5f9ae0d00e994556833806685
c931fb709b0f08b4e869bea5c827859549e82c544b8d29c816a0390999613920
7e610d5727a16318c2003c1fa24be0de2b32caf92224e7c17e5004b6350c4c01
05601218066b0ad28224e149019c086257ca315102de2712903bde97b8144d82
3b2c6ac52d403c054e019249b087f53d0558995a99ea946c70cc927458b3c1ff
550f30050df988d4284376b4566a8e416654cc921985e037e0df0fc131f00f4b
acf0c6211c036f14a239703741740adc7da227edd7e56b833d0ae92549b4d357
25dfb49ed2ff63908e6adf27d6d0dda7638d4154d2778daca17f58e61297c129
41f233b01f5dc3740cac51688c35c6b22580f48224fee9b83502569a66b629f1
09f3713473413e2666e7fe6f6c6efefdfafda1f56f6e06f93496d9d67cb7366a
9964b6f92e64b689196ec6c604646fd3fe4771ff1bf03f65d8ecc3addbb5f300
00000049454e44ae426082
"""),
}
# Make each of the dict entries also be a module entry.
sys.modules[__name__].__dict__.update(png)
def binary_stdout():
"""
A sys.stdout that accepts bytes.
"""
stdout = sys.stdout.buffer
# On Windows the C runtime file orientation needs changing.
if sys.platform == "win32":
import msvcrt
import os
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return stdout
def main(argv=None):
parser = argparse.ArgumentParser(
description="Output a PNG file from the PNG suite")
either = parser.add_mutually_exclusive_group(required=True)
either.add_argument('--list', action='store_true')
either.add_argument('image', nargs='?')
args = parser.parse_args()
if args.list:
for name in sorted(png):
print(name)
return 0
if args.image not in png:
raise ValueError("cannot find PNG suite image " + args.image)
binary_stdout().write(png[args.image])
if __name__ == '__main__':
sys.exit(main())
| mit | f60d01e049a6a844a5e903ce3d6706ee | 53.722639 | 69 | 0.952685 | 1.86739 | false | false | false | false |
drj11/pypng | code/minpng.py | 1 | 1182 | # minpng.py
import struct
import zlib
"""
minpng.py
MinPNG (minimal PNG) is the antidote to PyPNG.
Where PyPNG is several thousand lines and
can write and read all PNG formats,
MinPNG is two-dozen lines that writes
an 8-bit greyscale PNG and does nothing else.
"""
def rows_to_png(out, rows, size):
"""Write to the binary file `out` a single channel 8-bit PNG.
`rows` should yield each row in turn;
`size` should be the tuple of (width, height) in pixels.
"""
# Write out PNG signature.
out.write(bytearray([137, 80, 78, 71, 13, 10, 26, 10]))
# Write out PNG header chunk.
header = struct.pack(">2LBBBBB", size[0], size[1], 8, 0, 0, 0, 0)
write_chunk(out, b"IHDR", header)
bs = bytearray()
for row in rows:
bs.append(0)
bs.extend(row)
write_chunk(out, b"IDAT", zlib.compress(bs))
write_chunk(out, b"IEND", bytearray())
def write_chunk(out, chunk_type, data):
assert 4 == len(chunk_type)
out.write(struct.pack(">L", len(data)))
out.write(chunk_type)
out.write(data)
checksum = zlib.crc32(chunk_type)
checksum = zlib.crc32(data, checksum)
out.write(struct.pack(">L", checksum))
| mit | 614a46638acb7e4fbe116c307ae17cb3 | 25.266667 | 69 | 0.647208 | 3.118734 | false | false | false | false |
drj11/pypng | code/prix.py | 1 | 1753 | # prix.py
"""
prix (Python Raster Image eXtensions)
A collection of routines useful for raster image processing,
mostly not associate with the PNG format, but
using datatypes from PyPNG.
"""
# https://pypi.org/project/pypng/
import png
def window(image, tl, br):
"""image is a png.Image instance (or like one).
A new png.Image instance is returned that
represents a windowed image.
The window is an axis aligned rectangle with opposite corners
at *tl* and *br* (each being an (x,y) pair).
As in ImageMagick (0,0) is the top-left.
Coordinates are usually integers, but can be passed as
`None` in which case the relevant image boundary will be
used (0 for left, image width for right, 0 for top, image
height for bottom).
"""
width, height = image.info["size"]
left, top = tl
right, bottom = br
if top is None:
top = 0
if left is None:
left = 0
if bottom is None:
bottom = height
if right is None:
right = width
if not (0 <= left < right <= width):
raise NotImplementedError()
if not (0 <= top < bottom <= height):
raise NotImplementedError()
# Compute left and right index bounds for each row,
# given that each row is a flat row of values.
l = left * image.info["planes"]
r = right * image.info["planes"]
def itercrop():
"""An iterator to perform the crop."""
for i, row in enumerate(image.rows):
if i < top:
continue
if i >= bottom:
# Same as "raise StopIteration"
return
yield row[l:r]
info = dict(image.info, size=(right - left, bottom - top))
return png.Image(itercrop(), info)
| mit | c953d23198a453ffca8a5f9136e1f887 | 25.560606 | 65 | 0.608671 | 3.957111 | false | false | false | false |
psf/black | src/black/mode.py | 1 | 6841 | """Data structures configuring Black behavior.
Mostly around Python language feature support per version and Black configuration
chosen by the user.
"""
import sys
from dataclasses import dataclass, field
from enum import Enum, auto
from hashlib import sha256
from operator import attrgetter
from typing import Dict, Set
from warnings import warn
if sys.version_info < (3, 8):
from typing_extensions import Final
else:
from typing import Final
from black.const import DEFAULT_LINE_LENGTH
class TargetVersion(Enum):
PY33 = 3
PY34 = 4
PY35 = 5
PY36 = 6
PY37 = 7
PY38 = 8
PY39 = 9
PY310 = 10
PY311 = 11
class Feature(Enum):
F_STRINGS = 2
NUMERIC_UNDERSCORES = 3
TRAILING_COMMA_IN_CALL = 4
TRAILING_COMMA_IN_DEF = 5
# The following two feature-flags are mutually exclusive, and exactly one should be
# set for every version of python.
ASYNC_IDENTIFIERS = 6
ASYNC_KEYWORDS = 7
ASSIGNMENT_EXPRESSIONS = 8
POS_ONLY_ARGUMENTS = 9
RELAXED_DECORATORS = 10
PATTERN_MATCHING = 11
UNPACKING_ON_FLOW = 12
ANN_ASSIGN_EXTENDED_RHS = 13
EXCEPT_STAR = 14
VARIADIC_GENERICS = 15
DEBUG_F_STRINGS = 16
FORCE_OPTIONAL_PARENTHESES = 50
# __future__ flags
FUTURE_ANNOTATIONS = 51
FUTURE_FLAG_TO_FEATURE: Final = {
"annotations": Feature.FUTURE_ANNOTATIONS,
}
VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = {
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY36: {
Feature.F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_IDENTIFIERS,
},
TargetVersion.PY37: {
Feature.F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
},
TargetVersion.PY38: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
},
TargetVersion.PY39: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
},
TargetVersion.PY310: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PATTERN_MATCHING,
},
TargetVersion.PY311: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
},
}
def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool:
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
class Preview(Enum):
"""Individual preview style features."""
annotation_parens = auto()
empty_lines_before_class_or_def_with_leading_comments = auto()
handle_trailing_commas_in_head = auto()
long_docstring_quotes_on_newline = auto()
normalize_docstring_quotes_and_prefixes_properly = auto()
one_element_subscript = auto()
remove_block_trailing_newline = auto()
remove_redundant_parens = auto()
string_processing = auto()
skip_magic_trailing_comma_in_subscript = auto()
class Deprecated(UserWarning):
"""Visible deprecation warning."""
@dataclass
class Mode:
target_versions: Set[TargetVersion] = field(default_factory=set)
line_length: int = DEFAULT_LINE_LENGTH
string_normalization: bool = True
is_pyi: bool = False
is_ipynb: bool = False
skip_source_first_line: bool = False
magic_trailing_comma: bool = True
experimental_string_processing: bool = False
python_cell_magics: Set[str] = field(default_factory=set)
preview: bool = False
def __post_init__(self) -> None:
if self.experimental_string_processing:
warn(
(
"`experimental string processing` has been included in `preview`"
" and deprecated. Use `preview` instead."
),
Deprecated,
)
def __contains__(self, feature: Preview) -> bool:
"""
Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag.
The argument is not checked and features are not differentiated.
They only exist to make development easier by clarifying intent.
"""
if feature is Preview.string_processing:
return self.preview or self.experimental_string_processing
return self.preview
def get_cache_key(self) -> str:
if self.target_versions:
version_str = ",".join(
str(version.value)
for version in sorted(self.target_versions, key=attrgetter("value"))
)
else:
version_str = "-"
parts = [
version_str,
str(self.line_length),
str(int(self.string_normalization)),
str(int(self.is_pyi)),
str(int(self.is_ipynb)),
str(int(self.skip_source_first_line)),
str(int(self.magic_trailing_comma)),
str(int(self.experimental_string_processing)),
str(int(self.preview)),
sha256((",".join(sorted(self.python_cell_magics))).encode()).hexdigest(),
]
return ".".join(parts)
| mit | 3ced27505a4512925c18f40cedac8441 | 29.815315 | 87 | 0.640257 | 3.65438 | false | false | false | false |
psf/black | tests/data/simple_cases/fmtonoff5.py | 2 | 3319 | # Regression test for https://github.com/psf/black/issues/3129.
setup(
entry_points={
# fmt: off
"console_scripts": [
"foo-bar"
"=foo.bar.:main",
# fmt: on
] # Includes an formatted indentation.
},
)
# Regression test for https://github.com/psf/black/issues/2015.
run(
# fmt: off
[
"ls",
"-la",
]
# fmt: on
+ path,
check=True,
)
# Regression test for https://github.com/psf/black/issues/3026.
def test_func():
# yapf: disable
if unformatted( args ):
return True
# yapf: enable
elif b:
return True
return False
# Regression test for https://github.com/psf/black/issues/2567.
if True:
# fmt: off
for _ in range( 1 ):
# fmt: on
print ( "This won't be formatted" )
print ( "This won't be formatted either" )
else:
print ( "This will be formatted" )
# Regression test for https://github.com/psf/black/issues/3184.
class A:
async def call(param):
if param:
# fmt: off
if param[0:4] in (
"ABCD", "EFGH"
) :
# fmt: on
print ( "This won't be formatted" )
elif param[0:4] in ("ZZZZ",):
print ( "This won't be formatted either" )
print ( "This will be formatted" )
# Regression test for https://github.com/psf/black/issues/2985
class Named(t.Protocol):
# fmt: off
@property
def this_wont_be_formatted ( self ) -> str: ...
class Factory(t.Protocol):
def this_will_be_formatted ( self, **kwargs ) -> Named: ...
# fmt: on
# output
# Regression test for https://github.com/psf/black/issues/3129.
setup(
entry_points={
# fmt: off
"console_scripts": [
"foo-bar"
"=foo.bar.:main",
# fmt: on
] # Includes an formatted indentation.
},
)
# Regression test for https://github.com/psf/black/issues/2015.
run(
# fmt: off
[
"ls",
"-la",
]
# fmt: on
+ path,
check=True,
)
# Regression test for https://github.com/psf/black/issues/3026.
def test_func():
# yapf: disable
if unformatted( args ):
return True
# yapf: enable
elif b:
return True
return False
# Regression test for https://github.com/psf/black/issues/2567.
if True:
# fmt: off
for _ in range( 1 ):
# fmt: on
print ( "This won't be formatted" )
print ( "This won't be formatted either" )
else:
print("This will be formatted")
# Regression test for https://github.com/psf/black/issues/3184.
class A:
async def call(param):
if param:
# fmt: off
if param[0:4] in (
"ABCD", "EFGH"
) :
# fmt: on
print ( "This won't be formatted" )
elif param[0:4] in ("ZZZZ",):
print ( "This won't be formatted either" )
print("This will be formatted")
# Regression test for https://github.com/psf/black/issues/2985
class Named(t.Protocol):
# fmt: off
@property
def this_wont_be_formatted ( self ) -> str: ...
class Factory(t.Protocol):
def this_will_be_formatted(self, **kwargs) -> Named:
...
# fmt: on
| mit | 45b87ba650505b8353cec6a048c8b28a | 20.006329 | 64 | 0.536306 | 3.446521 | false | true | false | false |
kaste/mockito-python | mockito/spying.py | 1 | 3115 | # Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''Spying on real objects.'''
import inspect
from .mockito import when2
from .invocation import RememberedProxyInvocation
from .mocking import Mock, _Dummy, mock_registry
__all__ = ['spy']
def spy(object):
"""Spy an object.
Spying means that all functions will behave as before, so they will
be side effects, but the interactions can be verified afterwards.
Returns Dummy-like, almost empty object as proxy to `object`.
The *returned* object must be injected and used by the code under test;
after that all interactions can be verified as usual.
T.i. the original object **will not be patched**, and has no further
knowledge as before.
E.g.::
import time
time = spy(time)
# inject time
do_work(..., time)
verify(time).time()
"""
if inspect.isclass(object) or inspect.ismodule(object):
class_ = None
else:
class_ = object.__class__
class Spy(_Dummy):
if class_:
__class__ = class_
def __getattr__(self, method_name):
return RememberedProxyInvocation(theMock, method_name)
def __repr__(self):
name = 'Spied'
if class_:
name += class_.__name__
return "<%s id=%s>" % (name, id(self))
obj = Spy()
theMock = Mock(obj, strict=True, spec=object)
mock_registry.register(obj, theMock)
return obj
def spy2(fn): # type: (...) -> None
"""Spy usage of given `fn`.
Patches the module, class or object `fn` lives in, so that all
interactions can be recorded; otherwise executes `fn` as before, so
that all side effects happen as before.
E.g.::
import time
spy2(time.time)
do_work(...) # nothing injected, uses global patched `time` module
verify(time).time()
Note that builtins often cannot be patched because they're read-only.
"""
when2(fn, Ellipsis).thenCallOriginalImplementation()
| mit | e7efde0fe2da4ce7c7a397bf7d73affa | 30.785714 | 79 | 0.673836 | 4.071895 | false | false | false | false |
pydata/numexpr | numexpr/__init__.py | 1 | 2262 | ###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
"""
Numexpr is a fast numerical expression evaluator for NumPy. With it,
expressions that operate on arrays (like "3*a+4*b") are accelerated
and use less memory than doing the same calculation in Python.
See:
https://github.com/pydata/numexpr
for more info about it.
"""
from numexpr.interpreter import MAX_THREADS, use_vml, __BLOCK_SIZE1__
is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE
# cpuinfo imports were moved into the test submodule function that calls them
# to improve import times.
import os, os.path
import platform
from numexpr.expressions import E
from numexpr.necompiler import NumExpr, disassemble, evaluate, re_evaluate
from numexpr.utils import (_init_num_threads,
get_vml_version, set_vml_accuracy_mode, set_vml_num_threads,
set_num_threads, get_num_threads,
detect_number_of_cores, detect_number_of_threads)
# Detect the number of cores
ncores = detect_number_of_cores()
# Initialize the number of threads to be used
nthreads = _init_num_threads()
# The default for VML is 1 thread (see #39)
# set_vml_num_threads(1)
from . import version
__version__ = version.version
def print_versions():
"""Print the versions of software that numexpr relies on."""
try:
import numexpr.tests
return numexpr.tests.print_versions()
except ImportError:
# To maintain Python 2.6 compatibility we have simple error handling
raise ImportError('`numexpr.tests` could not be imported, likely it was excluded from the distribution.')
def test(verbosity=1):
"""Run all the tests in the test suite."""
try:
import numexpr.tests
return numexpr.tests.test(verbosity=verbosity)
except ImportError:
# To maintain Python 2.6 compatibility we have simple error handling
raise ImportError('`numexpr.tests` could not be imported, likely it was excluded from the distribution.') | mit | 138e18922318486fb7c49bc1c2be451a | 32.776119 | 113 | 0.683908 | 3.906736 | false | true | false | false |
kalaspuff/tomodachi | tomodachi/transport/schedule.py | 1 | 28392 | import asyncio
import datetime
import inspect
import logging
import random
import time
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import pytz
import tzlocal
from tomodachi.helpers.crontab import get_next_datetime
from tomodachi.helpers.execution_context import (
decrease_execution_context_value,
increase_execution_context_value,
set_execution_context,
)
from tomodachi.invoker import Invoker
class Scheduler(Invoker):
close_waiter: Optional[asyncio.Future] = None
@classmethod
async def schedule_handler(
cls,
obj: Any,
context: Dict,
func: Any,
interval: Optional[Union[str, int]] = None,
timestamp: Optional[str] = None,
timezone: Optional[str] = None,
immediately: Optional[bool] = False,
) -> Any:
values = inspect.getfullargspec(func)
original_kwargs = (
{k: values.defaults[i] for i, k in enumerate(values.args[len(values.args) - len(values.defaults) :])}
if values.defaults
else {}
)
async def handler() -> None:
increase_execution_context_value("scheduled_functions_current_tasks")
try:
kwargs = dict(original_kwargs)
increase_execution_context_value("scheduled_functions_total_tasks")
routine = func(*(obj,), **kwargs)
if inspect.isawaitable(routine):
await routine
except (Exception, asyncio.CancelledError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
except BaseException as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
finally:
decrease_execution_context_value("scheduled_functions_current_tasks")
context["_schedule_scheduled_functions"] = context.get("_schedule_scheduled_functions", [])
context["_schedule_scheduled_functions"].append((interval, timestamp, timezone, immediately, func, handler))
start_func = cls.start_scheduler(obj, context)
return (await start_func) if start_func else None
@classmethod
def schedule_handler_with_interval(cls, interval: Union[str, int]) -> Callable:
def _func(_: Any, obj: Any, context: Dict, func: Any) -> Any:
return cls.schedule_handler(obj, context, func, interval=interval)
return _func
@staticmethod
def next_call_at(
current_time: float,
interval: Optional[Union[str, int]] = None,
timestamp: Optional[str] = None,
timezone: Optional[str] = None,
) -> int:
if not timezone:
tz = tzlocal.get_localzone()
else:
try:
tz = pytz.timezone(timezone or "")
except Exception as e:
raise Exception("Unknown timezone: {}".format(timezone)) from e
local_tz = tzlocal.get_localzone()
if interval is None and timestamp is not None:
if isinstance(timestamp, str):
try:
datetime_object = datetime.datetime.strptime(timestamp, "%Y-%m-%d %H:%M:%S")
interval = "{} {} {} {} * {}".format(
datetime_object.minute,
datetime_object.hour,
datetime_object.day,
datetime_object.month,
datetime_object.year,
)
second_modifier = 1
if datetime_object.replace(tzinfo=tz) > datetime.datetime.fromtimestamp(current_time).replace(
tzinfo=local_tz
):
second_modifier = -60
next_at = get_next_datetime(
interval,
datetime.datetime.fromtimestamp(current_time + second_modifier)
.replace(tzinfo=local_tz)
.astimezone(tz),
)
if not next_at:
return int(current_time + 60 * 60 * 24 * 365 * 100)
return int(next_at.timestamp() + datetime_object.second)
except ValueError:
pass
try:
datetime_object = datetime.datetime.strptime(timestamp, "%Y-%m-%d %H:%M")
interval = "{} {} {} {} * {}".format(
datetime_object.minute,
datetime_object.hour,
datetime_object.day,
datetime_object.month,
datetime_object.year,
)
next_at = get_next_datetime(
interval,
datetime.datetime.fromtimestamp(current_time + 1).replace(tzinfo=local_tz).astimezone(tz),
)
if not next_at:
return int(current_time + 60 * 60 * 24 * 365 * 100)
return int(next_at.timestamp())
except ValueError:
pass
try:
datetime_object = datetime.datetime.strptime(timestamp, "%H:%M:%S")
datetime_object = datetime.datetime(
datetime.datetime.fromtimestamp(current_time).year,
datetime.datetime.fromtimestamp(current_time).month,
datetime.datetime.fromtimestamp(current_time).day,
datetime_object.hour,
datetime_object.minute,
datetime_object.second,
)
interval = "{} {} * * *".format(datetime_object.minute, datetime_object.hour)
second_modifier = 1
if datetime_object.replace(tzinfo=tz) > datetime.datetime.fromtimestamp(current_time).replace(
tzinfo=local_tz
):
second_modifier = -60
next_at = get_next_datetime(
interval,
datetime.datetime.fromtimestamp(current_time + second_modifier)
.replace(tzinfo=local_tz)
.astimezone(tz),
)
if not next_at:
return int(current_time + 60 * 60 * 24 * 365 * 100)
return int(next_at.timestamp() + datetime_object.second)
except ValueError:
pass
try:
datetime_object = datetime.datetime.strptime(timestamp, "%H:%M")
interval = "{} {} * * *".format(datetime_object.minute, datetime_object.hour)
next_at = get_next_datetime(
interval,
datetime.datetime.fromtimestamp(current_time + 1).replace(tzinfo=local_tz).astimezone(tz),
)
if not next_at:
return int(current_time + 60 * 60 * 24 * 365 * 100)
return int(next_at.timestamp())
except ValueError:
pass
raise Exception("Invalid timestamp")
if interval is not None:
if isinstance(interval, int):
return int(current_time + interval)
interval_aliases: Dict[Tuple[str, ...], Union[str, int]] = {
("every second", "1s", "1 s", "1second", "1 second", "second", "secondly", "once per second"): 1,
(
"every minute",
"1m",
"1 m",
"1minute",
"1 minute",
"minute",
"minutely",
"once per minute",
): "@minutely",
("every hour", "1h", "1 h", "1hour", "1 hour", "hour", "hourly", "once per hour"): "@hourly",
("every day", "1d", "1 d", "1day", "1 day", "day", "daily", "once per day", "nightly"): "@daily",
("every month", "1month", "1 month", "month", "monthly", "once per month"): "@monthly",
(
"every year",
"1y",
"1 y",
"1year",
"1 year",
"year",
"yearly",
"once per year",
"annually",
): "@yearly",
(
"monday",
"mondays",
"mon",
"every monday",
"once per monday",
"weekly",
"once per week",
"week",
"every week",
): "0 0 * * 1",
("tuesday", "tuesdays", "tue", "every tuesday", "once per tuesday"): "0 0 * * 2",
("wednesday", "wednesdays", "wed", "every wednesday", "once per wednesday"): "0 0 * * 3",
("thursday", "thursdays", "thu", "every thursday", "once per thursday"): "0 0 * * 4",
("friday", "fridays", "fri", "every friday", "once per friday"): "0 0 * * 5",
("saturday", "saturdays", "sat", "every saturday", "once per saturday"): "0 0 * * 6",
("sunday", "sundays", "sun", "every sunday", "once per sunday"): "0 0 * * 0",
("weekday", "weekdays", "every weekday"): "0 0 * * 1-5",
("weekend", "weekends", "every weekend"): "0 0 * * 0,6",
}
interval = interval.lower()
if interval.endswith("s") or interval.endswith("seconds"):
try:
interval = int(interval.replace("seconds", "").replace("s", "").replace(" ", ""))
except ValueError:
pass
try:
interval_value: Union[str, int] = [v for k, v in interval_aliases.items() if interval in k][0]
except IndexError:
interval_value = interval
if isinstance(interval_value, int):
return int(current_time + interval_value)
try:
next_at = get_next_datetime(
interval_value,
datetime.datetime.fromtimestamp(current_time + 1).replace(tzinfo=local_tz).astimezone(tz),
)
if not next_at:
return int(current_time + 60 * 60 * 24 * 365 * 100)
return int(next_at.timestamp())
except Exception:
raise Exception("Invalid interval")
return int(current_time + 60 * 60 * 24 * 365 * 100)
@staticmethod
def get_timezone(timezone: Optional[str] = None) -> Optional[str]:
if timezone:
tz_aliases: Dict[Tuple[str, ...], str] = {
(
"+00:00",
"-00:00",
"00:00",
"0000",
"GMT +0000",
"GMT +00:00",
"GMT -00",
"GMT +00",
"GMT -0",
"GMT +0",
): "GMT0",
("+01:00", "+0100", "GMT +0100", "GMT +01:00", "GMT +01", "GMT +1"): "Etc/GMT-1",
("+02:00", "+0200", "GMT +0200", "GMT +02:00", "GMT +02", "GMT +2"): "Etc/GMT-2",
("+03:00", "+0300", "GMT +0300", "GMT +03:00", "GMT +03", "GMT +3"): "Etc/GMT-3",
("+04:00", "+0400", "GMT +0400", "GMT +04:00", "GMT +04", "GMT +4"): "Etc/GMT-4",
("+05:00", "+0500", "GMT +0500", "GMT +05:00", "GMT +05", "GMT +5"): "Etc/GMT-5",
("+06:00", "+0600", "GMT +0600", "GMT +06:00", "GMT +06", "GMT +6"): "Etc/GMT-6",
("+07:00", "+0700", "GMT +0700", "GMT +07:00", "GMT +07", "GMT +7"): "Etc/GMT-7",
("+08:00", "+0800", "GMT +0800", "GMT +08:00", "GMT +08", "GMT +8"): "Etc/GMT-8",
("+09:00", "+0900", "GMT +0900", "GMT +09:00", "GMT +09", "GMT +9"): "Etc/GMT-9",
("+10:00", "+1000", "GMT +1000", "GMT +10:00", "GMT +10"): "Etc/GMT-10",
("+11:00", "+1100", "GMT +1100", "GMT +11:00", "GMT +11"): "Etc/GMT-11",
("+12:00", "+1200", "GMT +1200", "GMT +12:00", "GMT +12"): "Etc/GMT-12",
("-01:00", "-0100", "GMT -0100", "GMT -01:00", "GMT -01", "GMT -1"): "Etc/GMT+1",
("-02:00", "-0200", "GMT -0200", "GMT -02:00", "GMT -02", "GMT -2"): "Etc/GMT+2",
("-03:00", "-0300", "GMT -0300", "GMT -03:00", "GMT -03", "GMT -3"): "Etc/GMT+3",
("-04:00", "-0400", "GMT -0400", "GMT -04:00", "GMT -04", "GMT -4"): "Etc/GMT+4",
("-05:00", "-0500", "GMT -0500", "GMT -05:00", "GMT -05", "GMT -5"): "Etc/GMT+5",
("-06:00", "-0600", "GMT -0600", "GMT -06:00", "GMT -06", "GMT -6"): "Etc/GMT+6",
("-07:00", "-0700", "GMT -0700", "GMT -07:00", "GMT -07", "GMT -7"): "Etc/GMT+7",
("-08:00", "-0800", "GMT -0800", "GMT -08:00", "GMT -08", "GMT -8"): "Etc/GMT+8",
("-09:00", "-0900", "GMT -0900", "GMT -09:00", "GMT -09", "GMT -9"): "Etc/GMT+9",
("-10:00", "-1000", "GMT -1000", "GMT -10:00", "GMT -10"): "Etc/GMT+10",
("-11:00", "-1100", "GMT -1100", "GMT -11:00", "GMT -11"): "Etc/GMT+11",
("-12:00", "-1200", "GMT -1200", "GMT -12:00", "GMT -12"): "Etc/GMT+12",
}
try:
try:
timezone = [
v
for k, v in tz_aliases.items()
if timezone in k or timezone.replace(" ", "") in [x.replace(" ", "") for x in k]
][0]
except IndexError:
pass
pytz.timezone(timezone or "")
except Exception as e:
raise Exception("Unknown timezone: {}".format(timezone)) from e
return timezone
@classmethod
async def start_schedule_loop(
cls,
obj: Any,
context: Dict,
handler: Callable,
func: Callable,
interval: Optional[Union[str, int]] = None,
timestamp: Optional[str] = None,
timezone: Optional[str] = None,
immediately: Optional[bool] = False,
) -> None:
timezone = cls.get_timezone(timezone)
if not cls.close_waiter:
cls.close_waiter = asyncio.Future()
stop_waiter: asyncio.Future = asyncio.Future()
start_waiter: asyncio.Future = asyncio.Future()
async def schedule_loop() -> None:
sleep_task: asyncio.Future
current_time = time.time()
max_sleep_time = 300
try:
sleep_task = asyncio.ensure_future(asyncio.sleep(10))
await asyncio.wait([sleep_task, start_waiter], return_when=asyncio.FIRST_COMPLETED)
if not sleep_task.done():
sleep_task.cancel()
else:
logging.getLogger("transport.schedule").warning(
"Scheduled loop for function '{}' cannot start yet - start waiter not done for 10 seconds".format(
func.__name__
)
)
sleep_task = asyncio.ensure_future(asyncio.sleep(110))
await asyncio.wait([sleep_task, start_waiter], return_when=asyncio.FIRST_COMPLETED)
if not sleep_task.done():
sleep_task.cancel()
else:
logging.getLogger("transport.schedule").warning(
"Scheduled loop for function '{}' cannot start yet - start waiter not done for 120 seconds".format(
func.__name__
)
)
logging.getLogger("exception").exception("Scheduled loop not started for 120 seconds")
await asyncio.sleep(0.1)
await start_waiter
if not cls.close_waiter or cls.close_waiter.done():
logging.getLogger("transport.schedule").info(
"Scheduled loop for function '{}' never started before service termination".format(
func.__name__
)
)
else:
ts0 = cls.next_call_at(current_time, interval, timestamp, timezone)
for _ in range(10):
ts1 = cls.next_call_at(ts0, interval, timestamp, timezone)
ts2 = cls.next_call_at(ts1, interval, timestamp, timezone)
if int(ts2 - ts1) // 2 < max_sleep_time:
max_sleep_time = int(ts2 - ts1) // 2
max_sleep_time = min(max(max_sleep_time, 10), 300)
except (Exception, asyncio.CancelledError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
await asyncio.sleep(0.1)
await start_waiter
if not cls.close_waiter or cls.close_waiter.done():
logging.getLogger("transport.schedule").info(
"Scheduled loop for function '{}' never started before service termination".format(
func.__name__
)
)
next_call_at: Optional[int] = None
prev_call_at: Optional[int] = None
tasks: List = []
too_many_tasks = False
threshold = 20
run_immediately = immediately
while cls.close_waiter and not cls.close_waiter.done():
try:
if not run_immediately:
last_time = current_time
actual_time = time.time()
current_time = last_time + 1 if int(last_time + 1) < int(actual_time) else actual_time
if next_call_at is None:
next_call_at = cls.next_call_at(current_time, interval, timestamp, timezone)
if prev_call_at and prev_call_at == next_call_at:
if int(last_time + 60) < int(actual_time):
logging.getLogger("transport.schedule").warning(
"Scheduled tasks for function '{}' is out of time sync and may not run".format(
func.__name__
)
)
logging.getLogger("exception").exception("Scheduled task loop out of sync")
next_call_at = None
await asyncio.sleep(1)
continue
sleep_diff = int(current_time + 1) - actual_time + 0.001
if next_call_at > time.time() + 8:
sleep_diff = int((next_call_at - time.time()) / 3)
if sleep_diff >= max_sleep_time:
sleep_diff = int(max_sleep_time - random.random() * 5)
if sleep_diff >= 2:
sleep_task = asyncio.ensure_future(asyncio.sleep(sleep_diff))
await asyncio.wait([sleep_task, cls.close_waiter], return_when=asyncio.FIRST_COMPLETED)
if not sleep_task.done():
sleep_task.cancel()
current_time = time.time()
else:
await asyncio.sleep(sleep_diff)
if next_call_at > time.time():
continue
run_immediately = False
if cls.close_waiter.done():
continue
prev_call_at = next_call_at
next_call_at = None
tasks = [task for task in tasks if not task.done()]
if len(tasks) >= 20:
if not too_many_tasks and len(tasks) >= threshold:
too_many_tasks = True
logging.getLogger("transport.schedule").warning(
"Too many scheduled tasks ({}) for function '{}'".format(threshold, func.__name__)
)
threshold = threshold * 2
await asyncio.sleep(1)
current_time = time.time()
next_call_at = cls.next_call_at(current_time + 10, interval, timestamp, timezone)
continue
if too_many_tasks and len(tasks) >= 15:
await asyncio.sleep(1)
current_time = time.time()
next_call_at = cls.next_call_at(current_time + 10, interval, timestamp, timezone)
continue
if too_many_tasks and len(tasks) < 15:
logging.getLogger("transport.schedule").info(
"Tasks within threshold for function '{}' - resumed".format(func.__name__)
)
threshold = 20
too_many_tasks = False
current_time = time.time()
task = asyncio.ensure_future(handler())
if hasattr(task, "set_name"):
getattr(task, "set_name")(
"{} : {}".format(
func.__name__, datetime.datetime.utcfromtimestamp(current_time).isoformat()
)
)
tasks.append(task)
except (Exception, asyncio.CancelledError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
await asyncio.sleep(1)
except BaseException as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
await asyncio.sleep(1)
if tasks:
task_waiter = asyncio.ensure_future(asyncio.wait(tasks))
sleep_task = asyncio.ensure_future(asyncio.sleep(2))
await asyncio.wait([sleep_task, task_waiter], return_when=asyncio.FIRST_COMPLETED)
if not sleep_task.done():
sleep_task.cancel()
for task in tasks:
if task.done():
continue
task_name = getattr(task, "get_name")() if hasattr(task, "get_name") else func.__name__
logging.getLogger("transport.schedule").warning(
"Awaiting task '{}' to finish execution".format(task_name)
)
while not task_waiter.done():
sleep_task = asyncio.ensure_future(asyncio.sleep(10))
await asyncio.wait([sleep_task, task_waiter], return_when=asyncio.FIRST_COMPLETED)
if not sleep_task.done():
sleep_task.cancel()
for task in tasks:
if task.done():
continue
task_name = getattr(task, "get_name")() if hasattr(task, "get_name") else func.__name__
logging.getLogger("transport.schedule").warning(
"Still awaiting task '{}' to finish execution".format(task_name)
)
if not stop_waiter.done():
stop_waiter.set_result(None)
loop: Any = asyncio.get_event_loop()
stop_method = getattr(obj, "_stop_service", None)
async def stop_service(*args: Any, **kwargs: Any) -> None:
if cls.close_waiter and not cls.close_waiter.done():
cls.close_waiter.set_result(None)
if not start_waiter.done():
start_waiter.set_result(None)
await stop_waiter
if stop_method:
await stop_method(*args, **kwargs)
else:
await stop_waiter
if stop_method:
await stop_method(*args, **kwargs)
setattr(obj, "_stop_service", stop_service)
started_method = getattr(obj, "_started_service", None)
async def started_service(*args: Any, **kwargs: Any) -> None:
if started_method:
await started_method(*args, **kwargs)
if not start_waiter.done():
start_waiter.set_result(None)
setattr(obj, "_started_service", started_service)
loop.create_task(schedule_loop())
@classmethod
async def start_scheduler(cls, obj: Any, context: Dict) -> Optional[Callable]:
if context.get("_schedule_loop_started"):
return None
context["_schedule_loop_started"] = True
set_execution_context(
{
"scheduled_functions_enabled": True,
"scheduled_functions_current_tasks": 0,
"scheduled_functions_total_tasks": 0,
}
)
async def _schedule() -> None:
cls.close_waiter = asyncio.Future()
for interval, timestamp, timezone, immediately, func, handler in context.get(
"_schedule_scheduled_functions", []
):
cls.next_call_at(
time.time(), interval, timestamp, cls.get_timezone(timezone)
) # test provided interval/timestamp on init
for interval, timestamp, timezone, immediately, func, handler in context.get(
"_schedule_scheduled_functions", []
):
await cls.start_schedule_loop(obj, context, handler, func, interval, timestamp, timezone, immediately)
return _schedule
__schedule = Scheduler.decorator(Scheduler.schedule_handler)
__scheduler = Scheduler.decorator(Scheduler.schedule_handler)
__heartbeat = Scheduler.decorator(Scheduler.schedule_handler_with_interval(1))
__every_second = Scheduler.decorator(Scheduler.schedule_handler_with_interval(1))
__minutely = Scheduler.decorator(Scheduler.schedule_handler_with_interval("minutely"))
__hourly = Scheduler.decorator(Scheduler.schedule_handler_with_interval("hourly"))
__daily = Scheduler.decorator(Scheduler.schedule_handler_with_interval("daily"))
__monthly = Scheduler.decorator(Scheduler.schedule_handler_with_interval("monthly"))
def schedule(
interval: Optional[Union[str, int]] = None,
timestamp: Optional[str] = None,
timezone: Optional[str] = None,
immediately: Optional[bool] = False,
) -> Callable:
return cast(
Callable, __schedule(interval=interval, timestamp=timestamp, timezone=timezone, immediately=immediately)
)
def scheduler(
interval: Optional[Union[str, int]] = None,
timestamp: Optional[str] = None,
timezone: Optional[str] = None,
immediately: Optional[bool] = False,
) -> Callable:
return cast(
Callable, __scheduler(interval=interval, timestamp=timestamp, timezone=timezone, immediately=immediately)
)
def heartbeat(func: Optional[Callable] = None) -> Callable:
return cast(Callable, __heartbeat(func))
def every_second(func: Optional[Callable] = None) -> Callable:
return cast(Callable, __every_second(func))
def minutely(func: Optional[Callable] = None) -> Callable:
return cast(Callable, __minutely(func))
def hourly(func: Optional[Callable] = None) -> Callable:
return cast(Callable, __hourly(func))
def daily(func: Optional[Callable] = None) -> Callable:
return cast(Callable, __daily(func))
def monthly(func: Optional[Callable] = None) -> Callable:
return cast(Callable, __monthly(func))
| mit | 766296b7ced8f7d23be582073fbb7f9c | 44.066667 | 127 | 0.480417 | 4.448065 | false | false | false | false |
kalaspuff/tomodachi | examples/docker_example/http_service/app/service.py | 1 | 1415 | import json
import tomodachi
class Service(tomodachi.Service):
name = "example"
options = tomodachi.Options(
http=tomodachi.Options.HTTP(
port=31337,
content_type="application/json; charset=utf-8",
),
)
_healthy = True
@tomodachi.http("GET", r"/")
async def index_endpoint(self, request):
# tomodachi.get_execution_context() can be used for
# debugging purposes or to add additional service context
# in logs or alerts.
execution_context = tomodachi.get_execution_context()
return json.dumps(
{
"data": "hello world!",
"execution_context": execution_context,
}
)
@tomodachi.http("GET", r"/health/?", ignore_logging=True)
async def health_check(self, request):
if self._healthy:
return 200, json.dumps({"status": "healthy"})
else:
return 503, json.dumps({"status": "not healthy"})
@tomodachi.http_error(status_code=400)
async def error_400(self, request):
return json.dumps({"error": "bad-request"})
@tomodachi.http_error(status_code=404)
async def error_404(self, request):
return json.dumps({"error": "not-found"})
@tomodachi.http_error(status_code=405)
async def error_405(self, request):
return json.dumps({"error": "method-not-allowed"})
| mit | 8efe7322edef5f6a9a47f3fe457475c5 | 29.106383 | 65 | 0.596466 | 3.723684 | false | false | false | false |
kalaspuff/tomodachi | tomodachi/helpers/logging.py | 1 | 3633 | import logging
from logging.handlers import WatchedFileHandler
from typing import Any, Optional, Union
class CustomServiceLogHandler(WatchedFileHandler):
pass
def log_setup(
service: Any,
name: Optional[str] = None,
level: Optional[Union[str, int]] = None,
formatter: Optional[Union[logging.Formatter, str, bool]] = True,
filename: Optional[str] = None,
) -> logging.Logger:
if not name:
name = "log.{}".format(service.name) if getattr(service, "name", None) else "log.service"
if not filename:
raise Exception("log_filename must be specified for logging setup")
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
if level and type(level) is str:
level = getattr(logging, str(level))
if not [
x for x in logger.handlers if isinstance(x, CustomServiceLogHandler) and (level is None or level == x.level)
]:
try:
wfh = CustomServiceLogHandler(filename=filename)
except FileNotFoundError as e:
logging.getLogger("logging").warning(
'Unable to use file for logging - invalid path ("{}")'.format(filename)
)
raise e
except PermissionError as e:
logging.getLogger("logging").warning(
'Unable to use file for logging - invalid permissions ("{}")'.format(filename)
)
raise e
if level:
wfh.setLevel(level)
if formatter and type(formatter) is str:
formatter = logging.Formatter(str(formatter))
if formatter and type(formatter) is bool and formatter is True:
formatter = logging.Formatter("%(asctime)s (%(name)s): %(message)s")
if formatter and isinstance(formatter, logging.Formatter):
wfh.setFormatter(formatter)
logger.addHandler(wfh)
return logger
def log(service: Any, *args: Any, **kwargs: Any) -> None:
log_name = "log.{}".format(service.name) if getattr(service, "name", None) else "log.service"
log_level = None
log_message = None
if len(args) == 1:
log_message = args[0]
if len(args) == 2:
if type(args[0]) is int:
log_level = args[0]
elif type(args[0]) is str and str(args[0]) in ("NOTSET", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"):
log_level = getattr(logging, str(args[0]))
else:
log_name = args[0]
log_message = args[1]
if len(args) == 3:
log_name = args[0]
log_level = int(args[1]) if type(args[1]) is int else getattr(logging, str(args[1]))
log_message = args[2]
if "level" in kwargs:
log_level = 0
log_level_value = kwargs.pop("level", 0)
if type(log_level_value) is int:
log_level = int(log_level_value)
else:
log_level = int(getattr(logging, str(log_level_value)))
if "lvl" in kwargs:
log_level = 0
log_level_value = kwargs.pop("lvl", 0)
if type(log_level_value) is int:
log_level = int(log_level_value)
else:
log_level = int(getattr(logging, str(log_level_value)))
if "name" in kwargs:
log_name = kwargs.pop("name", None)
if "msg" in kwargs:
log_message = kwargs.pop("msg", None)
if "message" in kwargs:
log_message = kwargs.pop("message", None)
if "msg" in kwargs:
log_message = kwargs.pop("msg", None)
if not log_level:
log_level = logging.INFO
logger = logging.getLogger(log_name)
if log_message:
logger.log(log_level, str(log_message), **kwargs)
| mit | 28595a2e7fe74b31566dab2304b6b396 | 32.638889 | 116 | 0.59455 | 3.772586 | false | false | false | false |
wireservice/agate | agate/config.py | 3 | 4037 | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
This module contains the global configuration for agate. Users should use
:meth:`get_option` and :meth:`set_option` to modify the global
configuration.
**Available configuation options:**
+-------------------------+------------------------------------------+-----------------------------------------+
| Option | Description | Default value |
+=========================+==========================================+=========================================+
| default_locale | Default locale for number formatting | default_locale('LC_NUMERIC') or 'en_US' |
+-------------------------+------------------------------------------+-----------------------------------------+
| horizontal_line_char | Character to render for horizontal lines | u'-' |
+-------------------------+------------------------------------------+-----------------------------------------+
| vertical_line_char | Character to render for vertical lines | u'|' |
+-------------------------+------------------------------------------+-----------------------------------------+
| bar_char | Character to render for bar chart units | u'░' |
+-------------------------+------------------------------------------+-----------------------------------------+
| printable_bar_char | Printable character for bar chart units | u':' |
+-------------------------+------------------------------------------+-----------------------------------------+
| zero_line_char | Character to render for zero line units | u'▓' |
+-------------------------+------------------------------------------+-----------------------------------------+
| printable_zero_line_char| Printable character for zero line units | u'|' |
+-------------------------+------------------------------------------+-----------------------------------------+
| tick_char | Character to render for axis ticks | u'+' |
+-------------------------+------------------------------------------+-----------------------------------------+
| ellipsis_chars | Characters to render for ellipsis | u'...' |
+-------------------------+------------------------------------------+-----------------------------------------+
"""
from babel.core import default_locale
_options = {
#: Default locale for number formatting
'default_locale': default_locale('LC_NUMERIC') or 'en_US',
#: Character to render for horizontal lines
'horizontal_line_char': u'-',
#: Character to render for vertical lines
'vertical_line_char': u'|',
#: Character to render for bar chart units
'bar_char': u'░',
#: Printable character to render for bar chart units
'printable_bar_char': u':',
#: Character to render for zero line units
'zero_line_char': u'▓',
#: Printable character to render for zero line units
'printable_zero_line_char': u'|',
#: Character to render for axis ticks
'tick_char': u'+',
#: Characters to render for ellipsis
'ellipsis_chars': u'...',
}
def get_option(key):
"""
Get a global configuration option for agate.
:param key:
The name of the configuration option.
"""
return _options[key]
def set_option(key, value):
"""
Set a global configuration option for agate.
:param key:
The name of the configuration option.
:param value:
The new value to set for the configuration option.
"""
_options[key] = value
def set_options(options):
"""
Set a dictionary of options simultaneously.
:param hash:
A dictionary of option names and values.
"""
_options.update(options)
| mit | 8153b0705500b0cbee6d24dd73043dbb | 44.784091 | 112 | 0.384711 | 5.603616 | false | true | false | false |
kalaspuff/tomodachi | examples/basic_examples/amqp_middleware_service.py | 1 | 2061 | import os
from typing import Any, Callable, Dict
import tomodachi
from tomodachi import Options, amqp, amqp_publish
from tomodachi.discovery import DummyRegistry
from tomodachi.envelope import JsonBase
async def middleware_function(
func: Callable, service: Any, message: Any, routing_key: str, context: Dict, *args: Any, **kwargs: Any
) -> Any:
# Functionality before function is called
service.log("middleware before")
return_value = await func(*args, **kwargs)
# There's also the possibility to pass in extra arguments or keywords arguments, for example:
# return_value = await func(*args, id='overridden', **kwargs)
# Functinoality after function is called
service.log("middleware after")
return return_value
class ExampleAmqpService(tomodachi.Service):
name = "example-amqp-service"
log_level = "INFO"
uuid = str(os.environ.get("SERVICE_UUID") or "")
# Build own "discovery" functions, to be run on start and stop
# See tomodachi/discovery/dummy_registry.py for example
discovery = [DummyRegistry]
# The message envelope class defines how a message should be processed when sent and received
# See tomodachi/envelope/json_base.py for a basic example using JSON and transferring some metadata
message_envelope = JsonBase
# Adds a middleware function that is run on every incoming message.
# Several middlewares can be chained.
message_middleware = [middleware_function]
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = Options(amqp=Options.AMQP(queue_ttl=60))
@amqp("example.route1")
async def route1a(self, data: Any) -> None:
self.log('Received data (function: route1a) - "{}"'.format(data))
async def _started_service(self) -> None:
async def publish(data: Any, routing_key: str) -> None:
self.log('Publish data "{}"'.format(data))
await amqp_publish(self, data, routing_key=routing_key)
await publish("友達", "example.route1")
| mit | ab8fadc630655e52c8ab2b61bfe205f7 | 35.732143 | 106 | 0.704424 | 3.837687 | false | false | false | false |
kalaspuff/tomodachi | tomodachi/transport/http.py | 1 | 64366 | import asyncio
import functools
import inspect
import ipaddress
import logging
import os
import pathlib
import platform
import re
import time
import uuid
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, SupportsInt, Tuple, Union, cast
import yarl
from aiohttp import WSMsgType
from aiohttp import __version__ as aiohttp_version
from aiohttp import hdrs, web, web_protocol, web_server, web_urldispatcher
from aiohttp.helpers import BasicAuth
from aiohttp.http import HttpVersion
from aiohttp.streams import EofStream
from aiohttp.web_fileresponse import FileResponse
from multidict import CIMultiDict, CIMultiDictProxy
from tomodachi.helpers.dict import merge_dicts
from tomodachi.helpers.execution_context import (
decrease_execution_context_value,
increase_execution_context_value,
set_execution_context,
)
from tomodachi.helpers.middleware import execute_middlewares
from tomodachi.invoker import Invoker
from tomodachi.options import Options
http_logger = logging.getLogger("transport.http")
# Should be implemented as lazy load instead
class ColoramaCache:
_is_colorama_installed: Optional[bool] = None
_colorama: Any = None
class HttpException(Exception):
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._log_level = kwargs.get("log_level") if kwargs and kwargs.get("log_level") else "INFO"
class RequestHandler(web_protocol.RequestHandler):
__slots__ = (
*web_protocol.RequestHandler.__slots__,
"_server_header",
"_access_log",
"_connection_start_time",
"_keepalive",
)
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._server_header = kwargs.pop("server_header", None) if kwargs else None
self._access_log = kwargs.pop("access_log", None) if kwargs else None
self._connection_start_time = time.time()
super().__init__(*args, access_log=None, **kwargs) # type: ignore
@staticmethod
def get_request_ip(request: Any, context: Optional[Dict] = None) -> Optional[str]:
if request._cache.get("request_ip"):
return str(request._cache.get("request_ip", ""))
if request.transport:
if not context:
context = {}
http_options: Options.HTTP = HttpTransport.options(context).http
real_ip_header = http_options.real_ip_header
real_ip_from = http_options.real_ip_from
if isinstance(real_ip_from, str):
real_ip_from = [real_ip_from]
peername = request.transport.get_extra_info("peername")
request_ip = None
if peername:
request_ip, _ = peername
if (
real_ip_header
and real_ip_from
and request.headers.get(real_ip_header)
and request_ip
and len(real_ip_from)
):
if any([ipaddress.ip_address(request_ip) in ipaddress.ip_network(cidr) for cidr in real_ip_from]):
request_ip = request.headers.get(real_ip_header).split(",")[0].strip().split(" ")[0].strip()
request._cache["request_ip"] = request_ip
return request_ip
return None
@staticmethod
def colorize_status(text: Optional[Union[str, int]], status: Optional[Union[str, int, bool]] = False) -> str:
if ColoramaCache._is_colorama_installed is None:
try:
import colorama # noqa # isort:skip
ColoramaCache._is_colorama_installed = True
ColoramaCache._colorama = colorama
except Exception:
ColoramaCache._is_colorama_installed = False
if ColoramaCache._is_colorama_installed is False:
return str(text) if text else ""
if status is False:
status = text
status_code = str(status) if status else None
if status_code and not http_logger.handlers:
output_text = str(text) if text else ""
color = None
if status_code == "101":
color = ColoramaCache._colorama.Fore.CYAN
elif status_code[0] == "2":
color = ColoramaCache._colorama.Fore.GREEN
elif status_code[0] == "3" or status_code == "499":
color = ColoramaCache._colorama.Fore.YELLOW
elif status_code[0] == "4":
color = ColoramaCache._colorama.Fore.RED
elif status_code[0] == "5":
color = ColoramaCache._colorama.Fore.WHITE + ColoramaCache._colorama.Back.RED
if color:
return "{}{}{}".format(color, output_text, ColoramaCache._colorama.Style.RESET_ALL)
return output_text
return str(text) if text else ""
def handle_error(
self, request: Any, status: int = 500, exc: Any = None, message: Optional[str] = None
) -> web.Response:
"""Handle errors.
Returns HTTP response with specific status code. Logs additional
information. It always closes current connection."""
if self.transport is None:
# client has been disconnected during writing.
if self._access_log:
request_ip = RequestHandler.get_request_ip(request, None)
version_string = None
if isinstance(request.version, HttpVersion):
version_string = "HTTP/{}.{}".format(request.version.major, request.version.minor)
http_logger.info(
'[{}] [{}] {} {} "{} {}{}{}" - {} "{}" -'.format(
RequestHandler.colorize_status("http", 499),
RequestHandler.colorize_status(499),
request_ip or "",
'"{}"'.format(request._cache["auth"].login.replace('"', ""))
if request._cache.get("auth") and getattr(request._cache.get("auth"), "login", None)
else "-",
request.method,
request.path,
"?{}".format(request.query_string) if request.query_string else "",
" {}".format(version_string) if version_string else "",
request.content_length if request.content_length is not None else "-",
request.headers.get("User-Agent", "").replace('"', ""),
)
)
headers: CIMultiDict = CIMultiDict({})
headers[hdrs.CONTENT_TYPE] = "text/plain; charset=utf-8"
msg = "" if status == 500 or not message else message
headers[hdrs.CONTENT_LENGTH] = str(len(msg))
headers[hdrs.SERVER] = self._server_header or ""
if isinstance(request.version, HttpVersion) and (request.version.major, request.version.minor) in (
(1, 0),
(1, 1),
):
headers[hdrs.CONNECTION] = "close"
resp: web.Response = web.Response(status=status, text=msg, headers=headers)
resp.force_close()
# some data already got sent, connection is broken
if request.writer.output_size > 0 or self.transport is None:
self.force_close()
elif self.transport is not None:
request_ip = RequestHandler.get_request_ip(request, None)
if not request_ip:
peername = request.transport.get_extra_info("peername")
if peername:
request_ip, _ = peername
if self._access_log:
http_logger.info(
'[{}] [{}] {} {} "INVALID" {} - "" -'.format(
RequestHandler.colorize_status("http", status),
RequestHandler.colorize_status(status),
request_ip or "",
'"{}"'.format(request._cache["auth"].login.replace('"', ""))
if request._cache.get("auth") and getattr(request._cache.get("auth"), "login", None)
else "-",
len(msg),
)
)
return resp
class Server(web_server.Server):
__slots__ = (
"_loop",
"_connections",
"_kwargs",
"requests_count",
"request_handler",
"request_factory",
"_server_header",
"_access_log",
)
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._server_header = kwargs.pop("server_header", None) if kwargs else None
self._access_log = kwargs.pop("access_log", None) if kwargs else None
super().__init__(*args, **kwargs)
def __call__(self) -> RequestHandler:
return RequestHandler(
self, loop=self._loop, server_header=self._server_header, access_log=self._access_log, **self._kwargs
)
class DynamicResource(web_urldispatcher.DynamicResource):
def __init__(self, pattern: Any, *, name: Optional[str] = None) -> None:
self._routes: List = []
self._name = name
self._pattern = pattern
self._formatter = ""
class Response(object):
__slots__ = ("_body", "_status", "_reason", "_headers", "content_type", "charset", "missing_content_type")
def __init__(
self,
*,
body: Optional[Union[bytes, str]] = None,
status: int = 200,
reason: Optional[str] = None,
headers: Optional[Union[Dict, CIMultiDict, CIMultiDictProxy]] = None,
content_type: Optional[str] = None,
charset: Optional[str] = None,
) -> None:
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):
headers = CIMultiDict(headers)
self._body = body
self._status = status
self._reason = reason
self._headers = headers
self.content_type = content_type if hdrs.CONTENT_TYPE not in headers else None
self.charset = charset if hdrs.CONTENT_TYPE not in headers else None
self.missing_content_type = hdrs.CONTENT_TYPE not in headers and not content_type and not charset
def get_aiohttp_response(
self, context: Dict, default_charset: Optional[str] = None, default_content_type: Optional[str] = None
) -> web.Response:
if self.missing_content_type:
self.charset = default_charset
self.content_type = default_content_type
charset = self.charset
if hdrs.CONTENT_TYPE in self._headers and ";" in self._headers[hdrs.CONTENT_TYPE]:
try:
charset = (
str([v for v in self._headers[hdrs.CONTENT_TYPE].split(";") if "charset=" in v][0])
.replace("charset=", "")
.strip()
)
except IndexError:
pass
elif hdrs.CONTENT_TYPE in self._headers and ";" not in self._headers[hdrs.CONTENT_TYPE]:
charset = None
if self._body and not isinstance(self._body, bytes) and charset:
body = self._body
try:
body_value = body.encode(charset.lower())
except (ValueError, LookupError, UnicodeEncodeError) as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
raise web.HTTPInternalServerError() from e
elif self._body:
body_value = self._body.encode() if not isinstance(self._body, bytes) else self._body
else:
body_value = b""
response: web.Response = web.Response(
body=body_value,
status=self._status,
reason=self._reason,
headers=self._headers,
content_type=self.content_type,
charset=self.charset,
)
return response
class HttpTransport(Invoker):
server_port_mapping: Dict[Any, str] = {}
@classmethod
async def request_handler(
cls,
obj: Any,
context: Dict,
func: Any,
method: Union[str, List[str], Tuple[str, ...]],
url: str,
*,
ignore_logging: Union[bool, List[int], Tuple[int, ...]] = False,
pre_handler_func: Optional[Callable] = None,
) -> Any:
pattern = r"^{}$".format(re.sub(r"\$$", "", re.sub(r"^\^?(.*)$", r"\1", url)))
compiled_pattern = re.compile(pattern)
http_options: Options.HTTP = cls.options(context).http
default_content_type = http_options.content_type
default_charset = http_options.charset
if default_content_type is not None and ";" in default_content_type:
# for backwards compability
try:
default_charset = (
str([v for v in default_content_type.split(";") if "charset=" in v][0])
.replace("charset=", "")
.strip()
)
default_content_type = str([v for v in default_content_type.split(";")][0]).strip()
except IndexError:
pass
values = inspect.getfullargspec(func)
original_kwargs = (
{k: values.defaults[i] for i, k in enumerate(values.args[len(values.args) - len(values.defaults) :])}
if values.defaults
else {}
)
middlewares = context.get("http_middleware", [])
async def handler(request: web.Request) -> Union[web.Response, web.FileResponse]:
kwargs = dict(original_kwargs)
if "(" in pattern:
result = compiled_pattern.match(request.path)
if result:
for k, v in result.groupdict().items():
kwargs[k] = v
@functools.wraps(func)
async def routine_func(
*a: Any, **kw: Any
) -> Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response]:
routine = func(*(obj, request, *a), **merge_dicts(kwargs, kw))
return_value: Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response] = (
(await routine) if inspect.isawaitable(routine) else routine
)
return return_value
if not context.get("_http_accept_new_requests"):
raise web.HTTPServiceUnavailable()
if pre_handler_func:
await pre_handler_func(obj, request)
return_value: Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response]
if middlewares:
return_value = await execute_middlewares(func, routine_func, middlewares, *(obj, request))
else:
routine = func(obj, request, **kwargs)
return_value = (await routine) if inspect.isawaitable(routine) else routine
response = resolve_response_sync(
return_value,
request=request,
context=context,
default_content_type=default_content_type,
default_charset=default_charset,
)
return response
context["_http_routes"] = context.get("_http_routes", [])
route_context = {"ignore_logging": ignore_logging}
if isinstance(method, list) or isinstance(method, tuple):
for m in method:
context["_http_routes"].append((m.upper(), pattern, handler, route_context))
elif isinstance(method, str):
context["_http_routes"].append((method.upper(), pattern, handler, route_context))
else:
raise Exception("Invalid method '{}' for route".format(str(method)))
start_func = cls.start_server(obj, context)
return (await start_func) if start_func else None
@classmethod
async def static_request_handler(
cls,
obj: Any,
context: Dict,
func: Any,
path: str,
base_url: str,
*,
ignore_logging: Union[bool, List[int], Tuple[int, ...]] = False,
) -> Any:
if "?P<filename>" not in base_url:
pattern = r"^{}(?P<filename>.+?)$".format(re.sub(r"\$$", "", re.sub(r"^\^?(.*)$", r"\1", base_url)))
else:
pattern = r"^{}$".format(re.sub(r"\$$", "", re.sub(r"^\^?(.*)$", r"\1", base_url)))
compiled_pattern = re.compile(pattern)
if path in ("", "/"):
# Hopefully noone wants to do this intentionally, and if anyone accidentally does we'll catch it here
raise Exception("Invalid path '{}' for static route".format(path))
if not path.startswith("/"):
path = "{}/{}".format(os.path.dirname(context.get("context", {}).get("_service_file_path", "")), path)
if not path.endswith("/"):
path = "{}/".format(path)
if os.path.realpath(path) == "/":
raise Exception("Invalid path '{}' for static route resolves to '/'".format(path))
async def handler(request: web.Request) -> Union[web.Response, web.FileResponse]:
normalized_request_path = yarl.URL._normalize_path(request.path)
if not normalized_request_path.startswith("/"):
raise web.HTTPNotFound()
result = compiled_pattern.match(normalized_request_path)
filename = result.groupdict()["filename"] if result else ""
basepath = os.path.realpath(path)
realpath = os.path.realpath("{}/{}".format(basepath, filename))
try:
if any(
[
not realpath,
not basepath,
realpath == basepath,
os.path.commonprefix((realpath, basepath)) != basepath,
not os.path.exists(realpath),
not os.path.isdir(basepath),
basepath == "/",
os.path.isdir(realpath),
]
):
raise web.HTTPNotFound()
# deepcode ignore PT: Input data to pathlib.Path is sanitized
pathlib.Path(realpath).open("r")
response: Union[web.Response, web.FileResponse] = FileResponse(path=realpath, chunk_size=256 * 1024)
return response
except PermissionError:
raise web.HTTPForbidden()
route_context = {"ignore_logging": ignore_logging}
context["_http_routes"] = context.get("_http_routes", [])
context["_http_routes"].append(("GET", pattern, handler, route_context))
start_func = cls.start_server(obj, context)
return (await start_func) if start_func else None
@classmethod
async def error_handler(cls, obj: Any, context: Dict, func: Any, status_code: int) -> Any:
http_options: Options.HTTP = cls.options(context).http
default_content_type = http_options.content_type
default_charset = http_options.charset
if default_content_type is not None and ";" in default_content_type:
# for backwards compability
try:
default_charset = (
str([v for v in default_content_type.split(";") if "charset=" in v][0])
.replace("charset=", "")
.strip()
)
default_content_type = str([v for v in default_content_type.split(";")][0]).strip()
except IndexError:
pass
values = inspect.getfullargspec(func)
kwargs = (
{k: values.defaults[i] for i, k in enumerate(values.args[len(values.args) - len(values.defaults) :])}
if values.defaults
else {}
)
middlewares = context.get("http_middleware", [])
async def handler(request: web.Request) -> Union[web.Response, web.FileResponse]:
request._cache["error_status_code"] = status_code
@functools.wraps(func)
async def routine_func(
*a: Any, **kw: Any
) -> Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response]:
routine = func(*(obj, request, *a), **merge_dicts(kwargs, kw))
return_value: Union[str, bytes, Dict, List, Tuple, web.Response, Response] = (
(await routine) if inspect.isawaitable(routine) else routine
)
return return_value
return_value: Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response]
if middlewares:
return_value = await execute_middlewares(func, routine_func, middlewares, *(obj, request))
else:
routine = func(obj, request, **kwargs)
return_value = (await routine) if inspect.isawaitable(routine) else routine
response = resolve_response_sync(
return_value,
request=request,
context=context,
status_code=status_code,
default_content_type=default_content_type,
default_charset=default_charset,
)
return response
context["_http_error_handler"] = context.get("_http_error_handler", {})
context["_http_error_handler"][int(status_code)] = handler
start_func = cls.start_server(obj, context)
return (await start_func) if start_func else None
@classmethod
async def websocket_handler(cls, obj: Any, context: Dict, func: Any, url: str) -> Any:
pattern = r"^{}$".format(re.sub(r"\$$", "", re.sub(r"^\^?(.*)$", r"\1", url)))
compiled_pattern = re.compile(pattern)
access_log = cls.options(context).http.access_log
async def _pre_handler_func(_: Any, request: web.Request) -> None:
request._cache["is_websocket"] = True
request._cache["websocket_uuid"] = str(uuid.uuid4())
values = inspect.getfullargspec(func)
original_kwargs = (
{k: values.defaults[i] for i, k in enumerate(values.args[len(values.args) - len(values.defaults) :])}
if values.defaults
else {}
)
@functools.wraps(func)
async def _func(obj: Any, request: web.Request, *a: Any, **kw: Any) -> None:
websocket = web.WebSocketResponse()
request_ip = RequestHandler.get_request_ip(request, context)
try:
await websocket.prepare(request)
except Exception:
try:
await websocket.close()
except Exception:
pass
if access_log:
http_logger.info(
'[{}] {} {} "CANCELLED {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status("websocket", 101),
request_ip,
'"{}"'.format(request._cache["auth"].login.replace('"', ""))
if request._cache.get("auth") and getattr(request._cache.get("auth"), "login", None)
else "-",
request.path,
"?{}".format(request.query_string) if request.query_string else "",
request._cache.get("websocket_uuid", ""),
request.headers.get("User-Agent", "").replace('"', ""),
"-",
)
)
return
context["_http_open_websockets"] = context.get("_http_open_websockets", [])
context["_http_open_websockets"].append(websocket)
if access_log:
http_logger.info(
'[{}] {} {} "OPEN {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status("websocket", 101),
request_ip,
'"{}"'.format(request._cache["auth"].login.replace('"', ""))
if request._cache.get("auth") and getattr(request._cache.get("auth"), "login", None)
else "-",
request.path,
"?{}".format(request.query_string) if request.query_string else "",
request._cache.get("websocket_uuid", ""),
request.headers.get("User-Agent", "").replace('"', ""),
"-",
)
)
kwargs = dict(original_kwargs)
if "(" in pattern:
result = compiled_pattern.match(request.path)
if result:
for k, v in result.groupdict().items():
kwargs[k] = v
if len(values.args) - (len(values.defaults) if values.defaults else 0) >= 3:
# If the function takes a third required argument the value will be filled with the request object
a = a + (request,)
if "request" in values.args and (
len(values.args) - (len(values.defaults) if values.defaults else 0) < 3 or values.args[2] != "request"
):
kwargs["request"] = request
try:
routine = func(*(obj, websocket, *a), **merge_dicts(kwargs, kw))
callback_functions: Optional[Union[Tuple[Callable, Callable], Tuple[Callable], Callable]] = (
(await routine) if inspect.isawaitable(routine) else routine
)
except Exception as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
try:
await websocket.close()
except Exception:
pass
try:
context["_http_open_websockets"].remove(websocket)
except Exception:
pass
if access_log:
http_logger.info(
'[{}] {} {} "{} {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status("websocket", 500),
request_ip,
'"{}"'.format(request._cache["auth"].login.replace('"', ""))
if request._cache.get("auth") and getattr(request._cache.get("auth"), "login", None)
else "-",
RequestHandler.colorize_status("ERROR", 500),
request.path,
"?{}".format(request.query_string) if request.query_string else "",
request._cache.get("websocket_uuid", ""),
request.headers.get("User-Agent", "").replace('"', ""),
"-",
)
)
return
_receive_func = None
_close_func = None
if callback_functions and isinstance(callback_functions, tuple):
if len(callback_functions) == 2:
_receive_func, _close_func = cast(Tuple[Callable, Callable], callback_functions)
elif len(callback_functions) == 1:
(_receive_func,) = cast(Tuple[Callable], callback_functions)
elif callback_functions:
_receive_func = callback_functions
try:
async for message in websocket:
if message.type == WSMsgType.TEXT:
if _receive_func:
try:
await _receive_func(message.data)
except Exception as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
elif message.type == WSMsgType.ERROR:
if not context.get("log_level") or context.get("log_level") in ["DEBUG"]:
ws_exception = websocket.exception()
if isinstance(ws_exception, (EofStream, RuntimeError)):
pass
elif isinstance(ws_exception, Exception):
logging.getLogger("exception").exception(
"Uncaught exception: {}".format(str(ws_exception))
)
else:
http_logger.warning('Websocket exception: "{}"'.format(ws_exception))
elif message.type == WSMsgType.CLOSED:
break # noqa
except Exception:
pass
finally:
if _close_func:
try:
await _close_func()
except Exception as e:
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
try:
await websocket.close()
except Exception:
pass
try:
context["_http_open_websockets"].remove(websocket)
except Exception:
pass
return await cls.request_handler(obj, context, _func, "GET", url, pre_handler_func=_pre_handler_func)
@staticmethod
async def start_server(obj: Any, context: Dict) -> Optional[Callable]:
if context.get("_http_server_started"):
return None
context["_http_server_started"] = True
http_options: Options.HTTP = HttpTransport.options(context).http
server_header = http_options.server_header
access_log = http_options.access_log
logger_handler = None
if isinstance(access_log, str):
from logging.handlers import WatchedFileHandler # noqa # isort:skip
try:
wfh = WatchedFileHandler(filename=access_log)
except FileNotFoundError as e:
http_logger.warning('Unable to use file for access log - invalid path ("{}")'.format(access_log))
raise HttpException(str(e)) from e
except PermissionError as e:
http_logger.warning('Unable to use file for access log - invalid permissions ("{}")'.format(access_log))
raise HttpException(str(e)) from e
wfh.setLevel(logging.DEBUG)
http_logger.setLevel(logging.DEBUG)
http_logger.info('Logging to "{}"'.format(access_log))
logger_handler = wfh
http_logger.addHandler(logger_handler)
async def _start_server() -> None:
loop = asyncio.get_event_loop()
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
async def request_handler_func(
request: web.Request, handler: Callable
) -> Union[web.Response, web.FileResponse]:
response: Union[web.Response, web.FileResponse]
request_ip = RequestHandler.get_request_ip(request, context)
if not request_ip:
# Transport broken before request handling started, ignore request
response = web.Response(status=499, headers={hdrs.SERVER: server_header or ""})
response._eof_sent = True
response.force_close()
return response
if request.headers.get("Authorization"):
try:
request._cache["auth"] = BasicAuth.decode(request.headers.get("Authorization", ""))
except ValueError:
pass
timer = time.time() if access_log else 0
response = web.Response(status=503, headers={})
try:
response = await handler(request)
except web.HTTPException as e:
error_handler = context.get("_http_error_handler", {}).get(e.status, None)
if error_handler:
response = await error_handler(request)
else:
response = e
response.body = str(e).encode("utf-8")
except Exception as e:
error_handler = context.get("_http_error_handler", {}).get(500, None)
logging.getLogger("exception").exception("Uncaught exception: {}".format(str(e)))
if error_handler:
response = await error_handler(request)
else:
response = web.HTTPInternalServerError()
response.body = b""
finally:
if not request.transport:
response = web.Response(status=499, headers={})
response._eof_sent = True
request_version = (
(request.version.major, request.version.minor)
if isinstance(request.version, HttpVersion)
else (1, 0)
)
if access_log:
request_time = time.time() - timer
version_string = None
if isinstance(request.version, HttpVersion):
version_string = "HTTP/{}.{}".format(request_version[0], request_version[1])
if not request._cache.get("is_websocket"):
status_code = response.status if response is not None else 500
ignore_logging = getattr(handler, "ignore_logging", False)
if ignore_logging is True:
pass
elif isinstance(ignore_logging, (list, tuple)) and status_code in ignore_logging:
pass
else:
http_logger.info(
'[{}] [{}] {} {} "{} {}{}{}" {} {} "{}" {}'.format(
RequestHandler.colorize_status("http", status_code),
RequestHandler.colorize_status(status_code),
request_ip,
'"{}"'.format(request._cache["auth"].login.replace('"', ""))
if request._cache.get("auth")
and getattr(request._cache.get("auth"), "login", None)
else "-",
request.method,
request.path,
"?{}".format(request.query_string) if request.query_string else "",
" {}".format(version_string) if version_string else "",
response.content_length
if response is not None and response.content_length is not None
else "-",
request.content_length if request.content_length is not None else "-",
request.headers.get("User-Agent", "").replace('"', ""),
"{0:.5f}s".format(round(request_time, 5)),
)
)
else:
http_logger.info(
'[{}] {} {} "CLOSE {}{}" {} "{}" {}'.format(
RequestHandler.colorize_status("websocket", 101),
request_ip,
'"{}"'.format(request._cache["auth"].login.replace('"', ""))
if request._cache.get("auth") and getattr(request._cache.get("auth"), "login", None)
else "-",
request.path,
"?{}".format(request.query_string) if request.query_string else "",
request._cache.get("websocket_uuid", ""),
request.headers.get("User-Agent", "").replace('"', ""),
"{0:.5f}s".format(round(request_time, 5)),
)
)
if response is not None:
response.headers[hdrs.SERVER] = server_header or ""
if request_version in ((1, 0), (1, 1)) and not request._cache.get("is_websocket"):
use_keepalive = False
if context["_http_tcp_keepalive"] and request.keep_alive and request.protocol:
use_keepalive = True
if any(
[
# keep-alive timeout not set or is non-positive
(
not context["_http_keepalive_timeout"]
or context["_http_keepalive_timeout"] <= 0
),
# keep-alive request count has passed configured max for this connection
(
context["_http_max_keepalive_requests"]
and request.protocol._request_count
>= context["_http_max_keepalive_requests"]
),
# keep-alive time has passed configured max for this connection
(
context["_http_max_keepalive_time"]
and time.time()
> getattr(request.protocol, "_connection_start_time", 0)
+ context["_http_max_keepalive_time"]
),
]
):
use_keepalive = False
if use_keepalive:
response.headers[hdrs.CONNECTION] = "keep-alive"
response.headers[hdrs.KEEP_ALIVE] = "timeout={}{}".format(
request.protocol._keepalive_timeout,
", max={}".format(context["_http_max_keepalive_requests"])
if context["_http_max_keepalive_requests"]
else "",
)
else:
response.headers[hdrs.CONNECTION] = "close"
response.force_close()
if not context["_http_tcp_keepalive"] and not request._cache.get("is_websocket"):
response.force_close()
if isinstance(response, (web.HTTPException, web.HTTPInternalServerError)):
raise response
return response
@web.middleware
async def middleware(request: web.Request, handler: Callable) -> Union[web.Response, web.FileResponse]:
increase_execution_context_value("http_current_tasks")
increase_execution_context_value("http_total_tasks")
task = asyncio.ensure_future(request_handler_func(request, handler))
context["_http_active_requests"] = context.get("_http_active_requests", set())
context["_http_active_requests"].add(task)
try:
await asyncio.shield(task)
except asyncio.CancelledError:
try:
await task
decrease_execution_context_value("http_current_tasks")
try:
context["_http_active_requests"].remove(task)
except KeyError:
pass
return task.result()
except Exception:
decrease_execution_context_value("http_current_tasks")
try:
context["_http_active_requests"].remove(task)
except KeyError:
pass
raise
except Exception:
decrease_execution_context_value("http_current_tasks")
try:
context["_http_active_requests"].remove(task)
except KeyError:
pass
raise
except BaseException:
decrease_execution_context_value("http_current_tasks")
try:
context["_http_active_requests"].remove(task)
except KeyError:
pass
raise
decrease_execution_context_value("http_current_tasks")
try:
context["_http_active_requests"].remove(task)
except KeyError:
pass
return task.result()
client_max_size_option = http_options.client_max_size
client_max_size_option_str = str(client_max_size_option).upper()
client_max_size = (1024**2) * 100
try:
if (
client_max_size_option
and isinstance(client_max_size_option, str)
and (client_max_size_option_str.endswith("G") or client_max_size_option_str.endswith("GB"))
):
client_max_size = int(
re.sub(cast(str, r"^([0-9]+)GB?$"), cast(str, r"\1"), client_max_size_option_str)
) * (1024**3)
elif (
client_max_size_option
and isinstance(client_max_size_option, str)
and (client_max_size_option_str.endswith("M") or client_max_size_option_str.endswith("MB"))
):
client_max_size = int(re.sub(r"^([0-9]+)MB?$", r"\1", client_max_size_option_str)) * (1024**2)
elif (
client_max_size_option
and isinstance(client_max_size_option, str)
and (client_max_size_option_str.endswith("K") or client_max_size_option_str.endswith("KB"))
):
client_max_size = int(re.sub(r"^([0-9]+)KB?$", r"\1", client_max_size_option_str)) * 1024
elif (
client_max_size_option
and isinstance(client_max_size_option, str)
and (client_max_size_option_str.endswith("B"))
):
client_max_size = int(re.sub(r"^([0-9]+)B?$", r"\1", client_max_size_option_str))
elif client_max_size_option:
client_max_size = int(client_max_size_option)
except Exception:
raise ValueError(
"Bad value for http option client_max_size: {}".format(str(client_max_size_option))
) from None
if client_max_size >= 0 and client_max_size < 1024:
raise ValueError(
"Too low value for http option client_max_size: {} ({})".format(
str(client_max_size_option), client_max_size_option
)
)
if client_max_size > 1024**3:
raise ValueError(
"Too high value for http option client_max_size: {} ({})".format(
str(client_max_size_option), client_max_size_option
)
)
app: web.Application = web.Application(middlewares=[middleware], client_max_size=client_max_size)
app._set_loop(None)
for method, pattern, handler, route_context in context.get("_http_routes", []):
try:
compiled_pattern = re.compile(pattern)
except re.error as exc:
raise ValueError("Bad http route pattern '{}': {}".format(pattern, exc)) from None
ignore_logging = route_context.get("ignore_logging", False)
setattr(handler, "ignore_logging", ignore_logging)
resource = DynamicResource(compiled_pattern)
app.router.register_resource(resource)
if method.upper() == "GET":
resource.add_route("HEAD", handler, expect_handler=None)
resource.add_route(method.upper(), handler, expect_handler=None)
context["_http_accept_new_requests"] = True
port = http_options.port
host = http_options.host
if port is True:
raise ValueError("Bad value for http option port: {}".format(str(port)))
# Configuration settings for keep-alive could use some refactoring
keepalive_timeout_option = http_options.keepalive_timeout or http_options.keepalive_expiry
keepalive_timeout = 0
if keepalive_timeout_option is None or keepalive_timeout_option is False:
keepalive_timeout_option = 0
if keepalive_timeout_option is True:
raise ValueError(
"Bad value for http option keepalive_timeout: {}".format(str(keepalive_timeout_option))
)
try:
keepalive_timeout = int(keepalive_timeout_option) if keepalive_timeout_option is not None else 0
except Exception:
raise ValueError(
"Bad value for http option keepalive_timeout: {}".format(str(keepalive_timeout_option))
) from None
tcp_keepalive = False
if keepalive_timeout > 0:
tcp_keepalive = True
else:
tcp_keepalive = False
keepalive_timeout = 0
max_keepalive_requests_option = http_options.max_keepalive_requests
max_keepalive_requests = None
if max_keepalive_requests_option is None or max_keepalive_requests_option is False:
max_keepalive_requests_option = None
if max_keepalive_requests_option is True:
raise ValueError(
"Bad value for http option max_keepalive_requests: {}".format(str(max_keepalive_requests_option))
)
try:
if max_keepalive_requests_option is not None:
max_keepalive_requests = int(max_keepalive_requests_option)
if max_keepalive_requests == 0:
max_keepalive_requests = None
except Exception:
raise ValueError(
"Bad value for http option max_keepalive_requests: {}".format(str(max_keepalive_requests_option))
) from None
if not tcp_keepalive and max_keepalive_requests:
raise ValueError(
"HTTP keep-alive must be enabled to use http option max_keepalive_requests - a http.keepalive_timeout option value is required"
) from None
max_keepalive_time_option = http_options.max_keepalive_time
max_keepalive_time = None
if max_keepalive_time_option is None or max_keepalive_time_option is False:
max_keepalive_time_option = None
if max_keepalive_time_option is True:
raise ValueError(
"Bad value for http option max_keepalive_time: {}".format(str(max_keepalive_time_option))
)
try:
if max_keepalive_time_option is not None:
max_keepalive_time = int(max_keepalive_time_option)
if max_keepalive_time == 0:
max_keepalive_time = None
except Exception:
raise ValueError(
"Bad value for http option max_keepalive_time: {}".format(str(max_keepalive_time_option))
) from None
if not tcp_keepalive and max_keepalive_time:
raise ValueError(
"HTTP keep-alive must be enabled to use http option max_keepalive_time - a http.keepalive_timeout option value is required"
) from None
reuse_port = True if http_options.reuse_port else False
if reuse_port and platform.system() != "Linux":
http_logger.warning(
"The http option reuse_port (socket.SO_REUSEPORT) can only enabled on Linux platforms - current "
f"platform is {platform.system()} - will revert option setting to not reuse ports"
)
reuse_port = False
context["_http_tcp_keepalive"] = tcp_keepalive
context["_http_keepalive_timeout"] = keepalive_timeout
context["_http_max_keepalive_requests"] = max_keepalive_requests
context["_http_max_keepalive_time"] = max_keepalive_time
set_execution_context(
{
"http_enabled": True,
"http_current_tasks": 0,
"http_total_tasks": 0,
"aiohttp_version": aiohttp_version,
}
)
try:
app.freeze()
web_server = Server(
app._handle,
request_factory=app._make_request,
server_header=server_header or "",
access_log=access_log,
keepalive_timeout=keepalive_timeout,
tcp_keepalive=tcp_keepalive,
)
if reuse_port:
if not port:
http_logger.warning(
"The http option reuse_port (socket option SO_REUSEPORT) is enabled by default on Linux - "
"listening on random ports with SO_REUSEPORT is dangerous - please double check your intent"
)
elif str(port) in HttpTransport.server_port_mapping.values():
http_logger.warning(
"The http option reuse_port (socket option SO_REUSEPORT) is enabled by default on Linux - "
"different service classes should not use the same port ({})".format(port)
)
if port:
HttpTransport.server_port_mapping[web_server] = str(port)
server_task = loop.create_server(web_server, host, port, reuse_port=reuse_port)
server = await server_task
except OSError as e:
context["_http_accept_new_requests"] = False
error_message = re.sub(".*: ", "", e.strerror)
http_logger.warning(
"Unable to bind service [http] to http://{}:{}/ ({})".format(
"127.0.0.1" if host == "0.0.0.0" else host, port, error_message
)
)
raise HttpException(str(e), log_level=context.get("log_level")) from e
if server.sockets:
socket_address = server.sockets[0].getsockname()
if socket_address:
port = int(socket_address[1])
HttpTransport.server_port_mapping[web_server] = str(port)
context["_http_port"] = port
stop_method = getattr(obj, "_stop_service", None)
async def stop_service(*args: Any, **kwargs: Any) -> None:
context["_http_tcp_keepalive"] = False
server.close()
await server.wait_closed()
HttpTransport.server_port_mapping.pop(web_server, None)
shutdown_sleep = 0
if len(web_server.connections):
shutdown_sleep = 1
await asyncio.sleep(1)
if not tcp_keepalive:
context["_http_accept_new_requests"] = False
open_websockets = context.get("_http_open_websockets", [])[:]
if open_websockets:
http_logger.info("Closing {} websocket connection(s)".format(len(open_websockets)))
tasks = []
for websocket in open_websockets:
try:
tasks.append(asyncio.ensure_future(websocket.close()))
except Exception:
pass
try:
await asyncio.wait_for(
asyncio.shield(asyncio.gather(*tasks, return_exceptions=True)), timeout=10
)
await asyncio.sleep(1)
except (Exception, asyncio.TimeoutError, asyncio.CancelledError):
pass
context["_http_open_websockets"] = []
termination_grace_period_seconds = 30
try:
termination_grace_period_seconds = int(
http_options.termination_grace_period_seconds or termination_grace_period_seconds
)
except Exception:
pass
log_wait_message = True if termination_grace_period_seconds >= 2 else False
if tcp_keepalive and len(web_server.connections):
wait_start_time = time.time()
while wait_start_time + max(2, termination_grace_period_seconds) > time.time():
active_requests = context.get("_http_active_requests", set())
if not active_requests and not len(web_server.connections):
break
if log_wait_message:
log_wait_message = False
if len(web_server.connections) and len(web_server.connections) != len(active_requests):
http_logger.info(
"Waiting for {} keep-alive connection(s) to close".format(
len(web_server.connections)
)
)
if active_requests:
http_logger.info(
"Waiting for {} active request(s) to complete - grace period of {} seconds".format(
len(active_requests), termination_grace_period_seconds
)
)
await asyncio.sleep(0.25)
termination_grace_period_seconds -= int(time.time() - wait_start_time)
context["_http_accept_new_requests"] = False
active_requests = context.get("_http_active_requests", set())
if active_requests:
if log_wait_message:
http_logger.info(
"Waiting for {} active request(s) to complete - grace period of {} seconds".format(
len(active_requests), termination_grace_period_seconds
)
)
try:
await asyncio.wait_for(
asyncio.shield(asyncio.gather(*active_requests, return_exceptions=True)),
timeout=max(2, termination_grace_period_seconds),
)
await asyncio.sleep(1)
active_requests = context.get("_http_active_requests", set())
except (Exception, asyncio.TimeoutError, asyncio.CancelledError):
active_requests = context.get("_http_active_requests", set())
if active_requests:
http_logger.warning(
"All requests did not gracefully finish execution - {} request(s) remaining".format(
len(active_requests)
)
)
context["_http_active_requests"] = set()
if shutdown_sleep > 0:
await asyncio.sleep(shutdown_sleep)
if len(web_server.connections):
http_logger.warning(
"The remaining {} open TCP connections will be forcefully closed".format(
len(web_server.connections)
)
)
await app.shutdown()
await asyncio.sleep(1)
else:
await app.shutdown()
if logger_handler:
http_logger.removeHandler(logger_handler)
await app.cleanup()
if stop_method:
await stop_method(*args, **kwargs)
setattr(obj, "_stop_service", stop_service)
for method, pattern, handler, route_context in context.get("_http_routes", []):
for registry in getattr(obj, "discovery", []):
if getattr(registry, "add_http_endpoint", None):
await registry.add_http_endpoint(obj, host, port, method, pattern)
http_logger.info(
"Listening [http] on http://{}:{}/".format("127.0.0.1" if host == "0.0.0.0" else host, port)
)
return _start_server
async def resolve_response(
value: Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response],
request: Optional[web.Request] = None,
context: Optional[Dict] = None,
status_code: Optional[Union[str, int]] = None,
default_content_type: Optional[str] = None,
default_charset: Optional[str] = None,
) -> Union[web.Response, web.FileResponse]:
return resolve_response_sync(
value=value,
request=request,
context=context,
status_code=status_code,
default_content_type=default_content_type,
default_charset=default_charset,
)
def resolve_response_sync(
value: Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response],
request: Optional[web.Request] = None,
context: Optional[Dict] = None,
status_code: Optional[Union[str, int]] = None,
default_content_type: Optional[str] = None,
default_charset: Optional[str] = None,
) -> Union[web.Response, web.FileResponse]:
if not context:
context = {}
if isinstance(value, Response):
return value.get_aiohttp_response(
context, default_content_type=default_content_type, default_charset=default_charset
)
if isinstance(value, web.FileResponse):
return value
status = (
int(status_code)
if status_code
else (request is not None and request._cache.get("error_status_code", 200)) or 200
)
headers = None
if isinstance(value, dict):
body = value.get("body")
_status: Optional[SupportsInt] = value.get("status")
if _status and isinstance(_status, (int, str, bytes)):
status = int(_status)
_returned_headers = value.get("headers")
if _returned_headers:
returned_headers: Union[Mapping[str, Any], Iterable[Tuple[str, Any]]] = _returned_headers
headers = CIMultiDict(returned_headers)
elif isinstance(value, list) or isinstance(value, tuple):
_status = value[0]
if _status and isinstance(_status, (int, str, bytes)):
status = int(_status)
body = value[1]
if len(value) > 2:
returned_headers = value[2]
headers = CIMultiDict(returned_headers)
elif isinstance(value, web.Response):
return value
else:
if value is None:
value = "" # type: ignore
body = value
return Response(
body=body, status=status, headers=headers, content_type=default_content_type, charset=default_charset
).get_aiohttp_response(context)
async def get_http_response_status(
value: Union[str, bytes, Dict, List, Tuple, web.Response, web.FileResponse, Response, Exception],
request: Optional[web.Request] = None,
verify_transport: bool = True,
) -> Optional[int]:
if isinstance(value, Exception) or isinstance(value, web.HTTPException):
status_code = int(getattr(value, "status", 500)) if value is not None else 500
return status_code
else:
response = resolve_response_sync(value, request=request)
status_code = int(response.status) if response is not None else 500
if verify_transport and request is not None and request.transport is None:
return 499
else:
return status_code
def get_http_response_status_sync(
value: Any, request: Optional[web.Request] = None, verify_transport: bool = True
) -> Optional[int]:
if isinstance(value, Exception) or isinstance(value, web.HTTPException):
status_code = int(getattr(value, "status", 500)) if value is not None else 500
return status_code
if verify_transport and request is not None and hasattr(request, "transport") and request.transport is None:
return 499
if isinstance(value, Response) and value._status:
return int(value._status)
elif isinstance(value, (web.Response, web.FileResponse)) and value.status:
return int(value.status)
elif isinstance(value, dict):
_status: Optional[SupportsInt] = value.get("status")
if _status and isinstance(_status, (int, str, bytes)):
return int(_status)
elif isinstance(value, list) or isinstance(value, tuple):
_status = value[0]
if _status and isinstance(_status, (int, str, bytes)):
return int(_status)
elif value and hasattr(value, "_status") and getattr(value, "_status", None):
return int(getattr(value, "_status"))
elif value and hasattr(value, "status") and getattr(value, "status", None):
return int(getattr(value, "status"))
return int((request is not None and request._cache.get("error_status_code", 200)) or 200)
__http = HttpTransport.decorator(HttpTransport.request_handler)
__http_error = HttpTransport.decorator(HttpTransport.error_handler)
__http_static = HttpTransport.decorator(HttpTransport.static_request_handler)
__websocket = HttpTransport.decorator(HttpTransport.websocket_handler)
__ws = HttpTransport.decorator(HttpTransport.websocket_handler)
def http(
method: Union[str, List[str], Tuple[str, ...]],
url: str,
*,
ignore_logging: Union[bool, List[int], Tuple[int, ...]] = False,
pre_handler_func: Optional[Callable] = None,
) -> Callable:
return cast(Callable, __http(method, url, ignore_logging=ignore_logging, pre_handler_func=pre_handler_func))
def http_error(status_code: int) -> Callable:
return cast(Callable, __http_error(status_code))
def http_static(
path: str, base_url: str, *, ignore_logging: Union[bool, List[int], Tuple[int, ...]] = False
) -> Callable:
return cast(Callable, __http_static(path, base_url, ignore_logging=ignore_logging))
def websocket(url: str) -> Callable:
return cast(Callable, __websocket(url))
def ws(url: str) -> Callable:
return cast(Callable, __ws(url))
| mit | d04d8d067e53354adf4282273ab9086e | 43.979734 | 147 | 0.510891 | 4.753767 | false | false | false | false |
wireservice/agate | tests/test_table/test_print_table.py | 3 | 4657 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import six
from babel.numbers import get_decimal_symbol
from agate import Table
from agate.data_types import Number, Text
from agate.testcase import AgateTestCase
class TestPrintTable(AgateTestCase):
def setUp(self):
self.rows = (
('1.7', 2000, 2000, 'a'),
('11.18', None, None, None),
('0', 1, 1, 'c')
)
self.number_type = Number()
self.american_number_type = Number(locale='en_US')
self.german_number_type = Number(locale='de_DE.UTF-8')
self.text_type = Text()
self.column_names = ['one', 'two', 'three', 'four']
self.column_types = [
self.number_type,
self.american_number_type,
self.german_number_type,
self.text_type
]
def test_print_table(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 6)
self.assertEqual(len(lines[0]), 32)
def test_print_table_max_rows(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_rows=2, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 6)
self.assertEqual(len(lines[0]), 32)
def test_print_table_max_columns(self):
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_columns=2, output=output)
lines = output.getvalue().split('\n')
self.assertEqual(len(lines), 6)
self.assertEqual(len(lines[0]), 23)
def test_print_table_max_precision(self):
rows = (
('1.745', 1.745, 1.72),
('11.123456', 11.123456, 5.10),
('0', 0, 0.10)
)
column_names = ['text_number', 'real_long_number', 'real_short_number']
column_types = [
self.text_type,
self.number_type,
self.number_type
]
table = Table(rows, column_names, column_types)
output = six.StringIO()
table.print_table(output=output, max_precision=2)
lines = output.getvalue().split('\n')
# Text shouldn't be affected
self.assertIn(u' 1.745 ', lines[2])
self.assertIn(u' 11.123456 ', lines[3])
self.assertIn(u' 0 ', lines[4])
# Test real precision above max
self.assertIn(u' 1' + get_decimal_symbol() + u'74… ', lines[2])
self.assertIn(u' 11' + get_decimal_symbol() + u'12… ', lines[3])
self.assertIn(u' 0' + get_decimal_symbol() + u'00… ', lines[4])
# Test real precision below max
self.assertIn(u' 1' + get_decimal_symbol() + u'72 ', lines[2])
self.assertIn(u' 5' + get_decimal_symbol() + u'10 ', lines[3])
self.assertIn(u' 0' + get_decimal_symbol() + u'10 ', lines[4])
def test_print_table_max_column_width(self):
rows = (
('1.7', 2, 2, 'this is long'),
('11.18', None, None, None),
('0', 1, 1, 'nope')
)
column_names = ['one', 'two', 'three', 'also, this is long']
table = Table(rows, column_names, self.column_types)
output = six.StringIO()
table.print_table(output=output, max_column_width=7)
lines = output.getvalue().split('\n')
self.assertIn(' also... ', lines[0])
self.assertIn(' this... ', lines[2])
self.assertIn(' nope ', lines[4])
def test_print_table_locale_american(self):
"""
Verify that the locale of the german number is correctly
controlling the format of how it is printed.
"""
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_columns=2, output=output, locale='en_US')
# If it's working, 2000 should appear as the english '2,000'
self.assertTrue("2,000" in output.getvalue())
def test_print_table_locale_german(self):
"""
Verify that the locale of the german number is correctly
controlling the format of how it is printed.
"""
table = Table(self.rows, self.column_names, self.column_types)
output = six.StringIO()
table.print_table(max_columns=2, output=output, locale='de_DE.UTF-8')
# If it's working, the english '2,000' should appear as '2.000'
self.assertTrue("2.000" in output.getvalue())
| mit | fb5af9a8c29fc435a8cf73069e90bbe0 | 33.451852 | 79 | 0.57235 | 3.510189 | false | true | false | false |
wireservice/agate | agate/table/rename.py | 3 | 2489 | #!/usr/bin/env python
# pylint: disable=W0212
from agate import utils
def rename(self, column_names=None, row_names=None, slug_columns=False, slug_rows=False, **kwargs):
"""
Create a copy of this table with different column names or row names.
By enabling :code:`slug_columns` or :code:`slug_rows` and not specifying
new names you may slugify the table's existing names.
:code:`kwargs` will be passed to the slugify method in python-slugify. See:
https://github.com/un33k/python-slugify
:param column_names:
New column names for the renamed table. May be either an array or
a dictionary mapping existing column names to new names. If not
specified, will use this table's existing column names.
:param row_names:
New row names for the renamed table. May be either an array or
a dictionary mapping existing row names to new names. If not
specified, will use this table's existing row names.
:param slug_columns:
If True, column names will be converted to slugs and duplicate names
will have unique identifiers appended.
:param slug_rows:
If True, row names will be converted to slugs and dupicate names will
have unique identifiers appended.
"""
from agate.table import Table
if isinstance(column_names, dict):
column_names = [column_names[name] if name in column_names else name for name in self._column_names]
if isinstance(row_names, dict):
row_names = [row_names[name] if name in row_names else name for name in self._row_names]
if slug_columns:
column_names = column_names or self._column_names
if column_names is not None:
if column_names == self._column_names:
column_names = utils.slugify(column_names, ensure_unique=False, **kwargs)
else:
column_names = utils.slugify(column_names, ensure_unique=True, **kwargs)
if slug_rows:
row_names = row_names or self.row_names
if row_names is not None:
row_names = utils.slugify(row_names, ensure_unique=True, **kwargs)
if column_names is not None and column_names != self._column_names:
if row_names is None:
row_names = self._row_names
return Table(self._rows, column_names, self._column_types, row_names=row_names, _is_fork=False)
else:
return self._fork(self._rows, column_names, self._column_types, row_names=row_names)
| mit | c682c0d618328d1ee1e9d5d5f80be37c | 39.803279 | 108 | 0.669345 | 3.944532 | false | false | false | false |
wireservice/agate | tests/test_table/test_pivot.py | 3 | 7880 | #!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
try:
from cdecimal import Decimal
except ImportError: # pragma: no cover
from decimal import Decimal
from agate import Table
from agate.aggregations import Sum
from agate.computations import Percent
from agate.data_types import Number, Text
from agate.testcase import AgateTestCase
class TestPivot(AgateTestCase):
def setUp(self):
self.rows = (
('joe', 'white', 'male', 20, 'blue'),
('jane', 'white', 'female', 20, 'blue'),
('josh', 'black', 'male', 20, 'blue'),
('jim', 'latino', 'male', 25, 'blue'),
('julia', 'white', 'female', 25, 'green'),
('joan', 'asian', 'female', 25, 'green')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['name', 'race', 'gender', 'age', 'color']
self.column_types = [self.text_type, self.text_type, self.text_type, self.number_type, self.text_type]
def test_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender')
pivot_rows = (
('white', 1, 2),
('black', 1, 0),
('latino', 1, 0),
('asian', 0, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertRowNames(pivot_table, ['white', 'black', 'latino', 'asian'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'])
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['group', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(lambda r: r['gender'], key_name='gender')
pivot_rows = (
('male', 3),
('female', 3)
)
self.assertColumnNames(pivot_table, ['gender', 'Count'])
self.assertRowNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_by_lambda_group_name_sequence_invalid(self):
table = Table(self.rows, self.column_names, self.column_types)
with self.assertRaises(ValueError):
table.pivot(['race', 'gender'], key_name='foo')
def test_pivot_no_key(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(pivot='gender')
pivot_rows = (
(3, 3),
)
self.assertColumnNames(pivot_table, ['male', 'female'])
self.assertColumnTypes(pivot_table, [Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race')
pivot_rows = (
('white', 3),
('black', 1),
('latino', 1),
('asian', 1)
)
self.assertColumnNames(pivot_table, ['race', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_sum(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', Sum('age'))
pivot_rows = (
('white', 20, 45),
('black', 20, 0),
('latino', 25, 0),
('asian', 0, 25)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_multiple_keys(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'], 'age')
pivot_rows = (
('white', 'male', 1, 0),
('white', 'female', 1, 1),
('black', 'male', 1, 0),
('latino', 'male', 0, 1),
('asian', 'female', 0, 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', '20', '25'])
self.assertRowNames(pivot_table, [
('white', 'male'),
('white', 'female'),
('black', 'male'),
('latino', 'male'),
('asian', 'female'),
])
self.assertColumnTypes(pivot_table, [Text, Text, Number, Number])
def test_pivot_multiple_keys_no_pivot(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot(['race', 'gender'])
pivot_rows = (
('white', 'male', 1),
('white', 'female', 2),
('black', 'male', 1),
('latino', 'male', 1),
('asian', 'female', 1),
)
self.assertRows(pivot_table, pivot_rows)
self.assertColumnNames(pivot_table, ['race', 'gender', 'Count'])
self.assertColumnTypes(pivot_table, [Text, Text, Number])
def test_pivot_default_value(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('race', 'gender', default_value=None)
pivot_rows = (
('white', 1, 2),
('black', 1, None),
('latino', 1, None),
('asian', None, 1)
)
self.assertColumnNames(pivot_table, ['race', 'male', 'female'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50)),
('female', Decimal(50)),
)
self.assertColumnNames(pivot_table, ['gender', 'Percent'])
self.assertColumnTypes(pivot_table, [Text, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_pivots(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count'))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(50), 0),
('female', Decimal(1) / Decimal(6) * Decimal(100), Decimal(1) / Decimal(3) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
def test_pivot_compute_kwargs(self):
table = Table(self.rows, self.column_names, self.column_types)
pivot_table = table.pivot('gender', 'color', computation=Percent('Count', total=8))
pivot_table.print_table(output=sys.stdout)
pivot_rows = (
('male', Decimal(3) / Decimal(8) * Decimal(100), 0),
('female', Decimal(1) / Decimal(8) * Decimal(100), Decimal(2) / Decimal(8) * Decimal(100)),
)
self.assertColumnNames(pivot_table, ['gender', 'blue', 'green'])
self.assertColumnTypes(pivot_table, [Text, Number, Number])
self.assertRows(pivot_table, pivot_rows)
| mit | f27a2ee6f49814b95b15e7d253cf2bbf | 32.389831 | 110 | 0.562817 | 3.565611 | false | true | false | false |
kalaspuff/tomodachi | tomodachi/envelope/json_base.py | 1 | 2554 | import base64
import json
import time
import uuid
import zlib
from typing import Any, Dict, Tuple, Union
PROTOCOL_VERSION = "tomodachi-json-base--1.0.0"
class JsonBase(object):
@classmethod
async def build_message(cls, service: Any, topic: str, data: Any, **kwargs: Any) -> str:
data_encoding = "raw"
if len(json.dumps(data)) >= 60000:
data = base64.b64encode(zlib.compress(json.dumps(data).encode("utf-8"))).decode("utf-8")
data_encoding = "base64_gzip_json"
message = {
"service": {"name": getattr(service, "name", None), "uuid": getattr(service, "uuid", None)},
"metadata": {
"message_uuid": "{}.{}".format(getattr(service, "uuid", ""), str(uuid.uuid4())),
"protocol_version": PROTOCOL_VERSION,
"compatible_protocol_versions": ["json_base-wip"], # deprecated
"timestamp": time.time(),
"topic": topic,
"data_encoding": data_encoding,
},
"data": data,
}
return json.dumps(message)
@classmethod
async def parse_message(cls, payload: str, **kwargs: Any) -> Union[Dict, Tuple]:
message = json.loads(payload)
message_uuid = message.get("metadata", {}).get("message_uuid")
timestamp = message.get("metadata", {}).get("timestamp")
data = None
if message.get("metadata", {}).get("data_encoding") == "raw":
data = message.get("data")
elif message.get("metadata", {}).get("data_encoding") == "base64_gzip_json":
data = json.loads(zlib.decompress(base64.b64decode(message.get("data").encode("utf-8"))).decode("utf-8"))
return (
{
"service": {
"name": message.get("service", {}).get("name"),
"uuid": message.get("service", {}).get("uuid"),
},
"metadata": {
"message_uuid": message.get("metadata", {}).get("message_uuid"),
"protocol_version": message.get("metadata", {}).get("protocol_version"),
"timestamp": message.get("metadata", {}).get("timestamp"),
"topic": message.get("metadata", {}).get("topic"),
"data_encoding": message.get("metadata", {}).get("data_encoding"),
},
"data": data,
},
message_uuid,
timestamp,
)
__all__ = [
"PROTOCOL_VERSION",
"JsonBase",
]
| mit | fa41b3244e58cc52bbe40f527cf90f79 | 36.014493 | 117 | 0.516836 | 4.060413 | false | false | false | false |
kalaspuff/tomodachi | examples/basic_examples/websockets/websocket_service.py | 1 | 1893 | import os
import uuid as uuid_
from typing import Callable, Tuple, Union
from aiohttp import web
from aiohttp.web_fileresponse import FileResponse
import tomodachi
from tomodachi import Options, http, http_error, http_static, websocket
class ExampleWebsocketService(tomodachi.Service):
name = "example-websocket-service"
log_level = "DEBUG"
uuid = str(os.environ.get("SERVICE_UUID") or "")
options = Options(
http=Options.HTTP(
port=4711,
content_type="text/plain; charset=utf-8",
access_log=True,
),
)
@http("GET", r"/(?:|index.html)")
async def index(self, request: web.Request) -> web.Response:
path = "{}/{}".format(
os.path.dirname(self.context.get("context", {}).get("_service_file_path")), "public/index.html"
)
response: web.Response = FileResponse(path=path, chunk_size=256 * 1024)
return response
@http_static("public/", r"/public")
async def public(self) -> None:
pass
@websocket(r"/websocket/?")
async def websocket_connection(self, websocket: web.WebSocketResponse) -> Tuple[Callable, Callable]:
# Called when a websocket client is connected
self.log("websocket client connected")
async def _receive(data: Union[str, bytes]) -> None:
# Called when the websocket receives data
self.log("websocket data received: {}".format(data))
await websocket.send_str("response {}".format(str(uuid_.uuid4())))
async def _close() -> None:
# Called when the websocket is closed by the other end
self.log("websocket closed")
# Receiving function and closure function returned as tuple
return _receive, _close
@http_error(status_code=404)
async def error_404(self, request: web.Request) -> str:
return "error 404"
| mit | 35de95a35ad615f5980cdf6b8b77bc12 | 32.803571 | 107 | 0.632858 | 4.02766 | false | false | false | false |
kalaspuff/tomodachi | examples/basic_examples/http_auth_service.py | 1 | 1208 | import os
import uuid as uuid_
from typing import Any
from aiohttp import web
import tomodachi
from tomodachi import HttpResponse, Options, http
@tomodachi.decorator
async def require_auth_token(instance: Any, request: web.Request) -> Any:
post_body = await request.read() if request.body_exists else None
if not post_body or post_body.decode() != instance.allowed_token:
return HttpResponse(body="Invalid token", status=403)
class ExampleHttpAuthService(tomodachi.Service):
name = "example-http-auth-service"
log_level = "DEBUG"
uuid = str(os.environ.get("SERVICE_UUID") or "")
allowed_token = str(uuid_.uuid4())
# Some options can be specified to define credentials, used ports, hostnames, access log, etc.
options = Options(
http=Options.HTTP(
port=4711,
content_type="text/plain; charset=utf-8",
access_log=True,
),
)
@http("GET", r"/get-token/?")
async def get_token(self, request: web.Request) -> str:
return self.allowed_token
@http("POST", r"/validate/?")
@require_auth_token
async def validate(self, request: web.Request) -> str:
return "Valid auth token!"
| mit | e6bd9884ffe08efa4d5c2517a37b1f70 | 28.463415 | 98 | 0.662252 | 3.660606 | false | false | false | false |
wireservice/agate | agate/table/to_csv.py | 4 | 1109 | #!/usr/bin/env python
# pylint: disable=W0212
import os
def to_csv(self, path, **kwargs):
"""
Write this table to a CSV. This method uses agate's builtin CSV writer,
which supports unicode on both Python 2 and Python 3.
`kwargs` will be passed through to the CSV writer.
:param path:
Filepath or file-like object to write to.
"""
from agate import csv
if 'lineterminator' not in kwargs:
kwargs['lineterminator'] = '\n'
close = True
f = None
try:
if hasattr(path, 'write'):
f = path
close = False
else:
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
f = open(path, 'w')
writer = csv.writer(f, **kwargs)
writer.writerow(self._column_names)
csv_funcs = [c.csvify for c in self._column_types]
for row in self._rows:
writer.writerow(tuple(csv_funcs[i](d) for i, d in enumerate(row)))
finally:
if close and f is not None:
f.close()
| mit | 9befada3799ec9f57da4e3507bd4b002 | 23.108696 | 78 | 0.569883 | 3.83737 | false | false | false | false |
wireservice/agate | agate/aggregations/summary.py | 4 | 1052 | #!/usr/bin/env python
from agate.aggregations.base import Aggregation
class Summary(Aggregation):
"""
Apply an arbitrary function to a column.
:param column_name:
The name of a column to be summarized.
:param data_type:
The return type of this aggregation.
:param func:
A function which will be passed the column for processing.
:param cast:
If :code:`True`, each return value will be cast to the specified
:code:`data_type` to ensure it is valid. Only disable this if you are
certain your summary always returns the correct type.
"""
def __init__(self, column_name, data_type, func, cast=True):
self._column_name = column_name
self._data_type = data_type
self._func = func
self._cast = cast
def get_aggregate_data_type(self, table):
return self._data_type
def run(self, table):
v = self._func(table.columns[self._column_name])
if self._cast:
v = self._data_type.cast(v)
return v
| mit | 460bd481cf69ebc88450bd82e4827cf3 | 28.222222 | 77 | 0.624525 | 3.984848 | false | false | false | false |
wireservice/agate | agate/table/group_by.py | 3 | 2067 | #!/usr/bin/env python
# pylint: disable=W0212
from collections import OrderedDict
from agate.data_types import Text
from agate.tableset import TableSet
def group_by(self, key, key_name=None, key_type=None):
"""
Create a :class:`.TableSet` with a table for each unique key.
Note that group names will always be coerced to a string, regardless of the
format of the input column.
:param key:
Either the name of a column from the this table to group by, or a
:class:`function` that takes a row and returns a value to group by.
:param key_name:
A name that describes the grouped properties. Defaults to the
column name that was grouped on or "group" if grouping with a key
function. See :class:`.TableSet` for more.
:param key_type:
An instance of any subclass of :class:`.DataType`. If not provided
it will default to a :class`.Text`.
:returns:
A :class:`.TableSet` mapping where the keys are unique values from
the :code:`key` and the values are new :class:`.Table` instances
containing the grouped rows.
"""
key_is_row_function = hasattr(key, '__call__')
if key_is_row_function:
key_name = key_name or 'group'
key_type = key_type or Text()
else:
column = self._columns[key]
key_name = key_name or column.name
key_type = key_type or column.data_type
groups = OrderedDict()
for row in self._rows:
if key_is_row_function:
group_name = key(row)
else:
group_name = row[column.name]
group_name = key_type.cast(group_name)
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(row)
if not groups:
return TableSet([self._fork([])], [], key_name=key_name, key_type=key_type)
output = OrderedDict()
for group, rows in groups.items():
output[group] = self._fork(rows)
return TableSet(output.values(), output.keys(), key_name=key_name, key_type=key_type)
| mit | 3a701ea2f3d8c449c3ce50495283c41b | 30.318182 | 89 | 0.63135 | 3.813653 | false | false | false | false |
wireservice/agate | agate/data_types/number.py | 3 | 3479 | #!/usr/bin/env python
# -*- coding: utf8 -*-
try:
from cdecimal import Decimal, InvalidOperation
except ImportError: # pragma: no cover
from decimal import Decimal, InvalidOperation
import warnings
import six
from babel.core import Locale
from agate.data_types.base import DataType
from agate.exceptions import CastError
#: A list of currency symbols sourced from `Xe <http://www.xe.com/symbols.php>`_.
DEFAULT_CURRENCY_SYMBOLS = [u'؋', u'$', u'ƒ', u'៛', u'¥', u'₡', u'₱', u'£', u'€', u'¢', u'﷼', u'₪', u'₩', u'₭', u'₮',
u'₦', u'฿', u'₤', u'₫']
POSITIVE = Decimal('1')
NEGATIVE = Decimal('-1')
class Number(DataType):
"""
Data representing numbers.
:param locale:
A locale specification such as :code:`en_US` or :code:`de_DE` to use
for parsing formatted numbers.
:param group_symbol:
A grouping symbol used in the numbers. Overrides the value provided by
the specified :code:`locale`.
:param decimal_symbol:
A decimal separate symbol used in the numbers. Overrides the value
provided by the specified :code:`locale`.
:param currency_symbols:
A sequence of currency symbols to strip from numbers.
"""
def __init__(self, locale='en_US', group_symbol=None, decimal_symbol=None,
currency_symbols=DEFAULT_CURRENCY_SYMBOLS, **kwargs):
super(Number, self).__init__(**kwargs)
self.locale = Locale.parse(locale)
self.currency_symbols = currency_symbols
# Suppress Babel warning on Python 3.6
# See #665
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.group_symbol = group_symbol or self.locale.number_symbols.get('group', ',')
self.decimal_symbol = decimal_symbol or self.locale.number_symbols.get('decimal', '.')
def cast(self, d):
"""
Cast a single value to a :class:`decimal.Decimal`.
:returns:
:class:`decimal.Decimal` or :code:`None`.
"""
if isinstance(d, Decimal) or d is None:
return d
t = type(d)
if t is int:
return Decimal(d)
elif six.PY2 and t is long: # noqa: F821
return Decimal(d)
elif t is float:
return Decimal(repr(d))
elif d is False:
return Decimal(0)
elif d is True:
return Decimal(1)
elif not isinstance(d, six.string_types):
raise CastError('Can not parse value "%s" as Decimal.' % d)
d = d.strip()
if d.lower() in self.null_values:
return None
d = d.strip('%')
if len(d) > 0 and d[0] == '-':
d = d[1:]
sign = NEGATIVE
else:
sign = POSITIVE
for symbol in self.currency_symbols:
d = d.strip(symbol)
d = d.replace(self.group_symbol, '')
d = d.replace(self.decimal_symbol, '.')
try:
return Decimal(d) * sign
# The Decimal class will return an InvalidOperation exception on most Python implementations,
# but PyPy3 may return a ValueError if the string is not translatable to ASCII
except (InvalidOperation, ValueError):
pass
raise CastError('Can not parse value "%s" as Decimal.' % d)
def jsonify(self, d):
if d is None:
return d
return float(d)
| mit | 92176598103cf0424d4dcc4af106e0fa | 29.245614 | 117 | 0.576276 | 3.818383 | false | false | false | false |
cyanfish/heltour | heltour/local/ben_vbox_ubuntu.py | 1 | 1064 | from datetime import timedelta
DEBUG = True
GOOGLE_SERVICE_ACCOUNT_KEYFILE_PATH = '/home/ben/gspread-creds.json'
SLACK_API_TOKEN_FILE_PATH = '/home/ben/slack-token'
SLACK_WEBHOOK_FILE_PATH = '/home/ben/slack-webhook'
FCM_API_KEY_FILE_PATH = '/home/ben/fcm-key'
LICHESS_DOMAIN = 'https://listage.ovh/'
JAVAFO_COMMAND = 'java -jar /home/ben/javafo.jar'
LINK_PROTOCOL = 'http'
INTERNAL_IPS = ['127.0.0.1', '192.168.56.101', 'localhost']
ALLOWED_HOSTS = INTERNAL_IPS
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
CELERYBEAT_SCHEDULE = {
# 'alternates_manager_tick': {
# 'task': 'heltour.tournament.tasks.alternates_manager_tick',
# 'schedule': timedelta(seconds=5),
# 'args': ()
# },
# 'update_tv_state': {
# 'task': 'heltour.tournament.tasks.update_tv_state',
# 'schedule': timedelta(seconds=20),
# 'args': ()
# },
'run_scheduled_events': {
'task': 'heltour.tournament.tasks.run_scheduled_events',
'schedule': timedelta(seconds=5),
'args': ()
},
}
| mit | fc6a4735a6ecda5634132de8b723047f | 31.242424 | 69 | 0.637218 | 2.931129 | false | false | true | false |
cyanfish/heltour | heltour/tournament/migrations/0080_auto_20160824_2233.py | 1 | 1531 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-24 22:33
import django.core.validators
from django.db import migrations, models
import heltour.tournament.models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0079_loneplayerscore_perf_rating'),
]
operations = [
migrations.AlterModelOptions(
name='player',
options={'ordering': ['lichess_username']},
),
migrations.AddField(
model_name='teampairing',
name='black_wins',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teampairing',
name='white_wins',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teamscore',
name='games_won',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teamscore',
name='head_to_head',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='teamscore',
name='sb_score',
field=heltour.tournament.models.ScoreField(default=0),
),
migrations.AlterField(
model_name='player',
name='lichess_username',
field=models.CharField(max_length=255, validators=[django.core.validators.RegexValidator('^[\\w-]+$')]),
),
]
| mit | ff72e7368ed75ae8e8d15e4840783a80 | 29.019608 | 116 | 0.570869 | 4.349432 | false | false | false | false |
cyanfish/heltour | heltour/tournament/migrations/0189_auto_20210221_0424.py | 1 | 2032 | # Generated by Django 2.2.13 on 2021-02-21 04:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0188_auto_20191223_1457'),
]
operations = [
migrations.AlterField(
model_name='alternate',
name='board_number',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12')]),
),
migrations.AlterField(
model_name='alternateassignment',
name='board_number',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12')]),
),
migrations.AlterField(
model_name='alternatebucket',
name='board_number',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12')]),
),
migrations.AlterField(
model_name='alternatesearch',
name='board_number',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12')]),
),
migrations.AlterField(
model_name='teammember',
name='board_number',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12')]),
),
migrations.AlterField(
model_name='teamplayerpairing',
name='board_number',
field=models.PositiveIntegerField(choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8'), (9, '9'), (10, '10'), (11, '11'), (12, '12')]),
),
]
| mit | fb1f2285986df8bc3879a1fca0aa5abf | 46.255814 | 182 | 0.448327 | 2.97511 | false | false | false | false |
cyanfish/heltour | heltour/api_worker/views.py | 1 | 3605 | import logging
import requests
import time
from . import worker
from django.core.cache import cache
from django.http.response import HttpResponse, JsonResponse
from django.utils.crypto import get_random_string
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
logger = logging.getLogger(__name__)
# Optimize for typical rate limits: 1, 2, 5, 10 minutes
retry_wait_times = {0: 60, 1: 60, 2: 180, 3: 300, 4: 600}
def _get_lichess_api_token():
try:
with open(settings.LICHESS_API_TOKEN_FILE_PATH) as fin:
return fin.read().strip()
except IOError:
return None
def _do_lichess_api_call(redis_key, path, method, post_data, params, priority, max_retries, format,
retry_count=0):
url = "https://lichess.org/%s" % path
token = _get_lichess_api_token()
logger.info('API call: %s' % url)
try:
headers = {}
if token:
headers['Authorization'] = 'Bearer %s' % token
if format:
headers['Accept'] = format
if method == 'POST':
r = requests.post(url, params=params, data=post_data, headers=headers)
else:
r = requests.get(url, params, headers=headers)
if r.status_code >= 400 and r.status_code < 500 and r.status_code != 429:
# Unrecoverable error
logger.info('API Client Error[url:%s]: %s: %s', url, r.status_code, r.text)
cache.set(redis_key, f'CLIENT-ERROR: [{r.status_code}] {r.text}', timeout=60)
time.sleep(2)
return
if r.status_code == 200:
# Success
logger.info('API success')
cache.set(redis_key, r.text, timeout=60)
time.sleep(2)
return
logger.warning('API status code %s: %s' % (r.status_code, r.text))
except Exception as e:
logger.error('API unexpected error %s: %s' % (path, e))
r = None
# Failure
if retry_count >= max_retries:
logger.error('API exceeded maximum retries for %s' % path)
cache.set(redis_key, '', timeout=60)
else:
# Retry
logger.warning('API queuing retry for %s' % path)
worker.queue_work(priority, _do_lichess_api_call, redis_key, path, method, post_data,
params, priority, max_retries, format, retry_count + 1)
if r is not None and r.status_code == 429:
# Too many requests
wait_time = retry_wait_times.get(retry_count, 600)
logger.warning(f'API 429, sleeping for {wait_time} seconds')
time.sleep(wait_time)
else:
time.sleep(2)
@csrf_exempt
def lichess_api_call(request, path):
params = request.GET.dict()
priority = int(params.pop('priority', 0))
max_retries = int(params.pop('max_retries', 5))
format = params.pop('format', None)
redis_key = get_random_string(length=16)
# support either a form encoded body or a raw body
post_data = request.POST.dict()
if len(post_data) == 0:
post_data = request.body.decode('utf-8')
worker.queue_work(priority, _do_lichess_api_call, redis_key, path, request.method,
post_data, params, priority, max_retries, format)
return HttpResponse(redis_key)
@csrf_exempt
def watch(request):
game_ids = request.body.decode('utf-8').split(',')
result = worker.watch_games(game_ids)
return JsonResponse({'result': result})
@csrf_exempt
def watch_add(request):
game_id = request.body.decode('utf-8')
worker.add_watch(game_id)
return JsonResponse({'ok': True})
| mit | d9022d8e908abb5f3e3eb0c5f6bec654 | 32.073394 | 99 | 0.610818 | 3.5 | false | false | false | false |
cyanfish/heltour | heltour/tournament/migrations/0157_playerpresence.py | 1 | 1322 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-07-30 00:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0156_modrequest'),
]
operations = [
migrations.CreateModel(
name='PlayerPresence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('first_msg_time', models.DateTimeField(blank=True, null=True)),
('last_msg_time', models.DateTimeField(blank=True, null=True)),
('online_for_game', models.BooleanField(default=False)),
('pairing', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.PlayerPairing')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player')),
('round', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Round')),
],
options={
'abstract': False,
},
),
]
| mit | bf3640e263f0f0ee3988c20ef0390130 | 39.060606 | 123 | 0.593041 | 4.13125 | false | false | false | false |
cyanfish/heltour | heltour/tournament/simulation.py | 1 | 2407 | import random
import math
from heltour.tournament.models import *
from heltour.tournament import pairinggen
sysrand = random.SystemRandom()
def simulate_round(round_):
forfeit_chance = 0.10
forfeit_results = ['1X-0F', '1/2Z-1/2Z', '0F-1X', '0F-0F']
def result_chances(rating_delta):
rating_delta_index = int(min(math.floor(abs(rating_delta) / 100.0), 5))
chances_by_rating_delta = [
(0.40, 0.15, 0.45),
(0.25, 0.10, 0.65),
(0.20, 0.10, 0.70),
(0.15, 0.10, 0.75),
(0.10, 0.10, 0.80),
(0.05, 0.00, 0.95),
]
chances = chances_by_rating_delta[rating_delta_index]
if rating_delta < 0:
chances = tuple(reversed(chances))
return chances
for p in round_.pairings.select_related('white', 'black'):
if sysrand.random() < forfeit_chance:
p.result = sysrand.choice(forfeit_results)
else:
chances = result_chances(p.white_rating_display() - p.black_rating_display())
r = sysrand.random()
if r < chances[0]:
p.result = '0-1'
elif r < chances[0] + chances[1]:
p.result = '1/2-1/2'
else:
p.result = '1-0'
p.save()
def simulate_season(season):
# Reset all season data
print('Clearing season data')
for r in season.round_set.order_by('-number'):
r.publish_pairings = False
r.is_completed = False
r.save()
LonePlayerPairing.objects.filter(round__season=season).delete()
TeamPlayerPairing.objects.filter(team_pairing__round__season=season).delete()
LonePlayerScore.objects.filter(season_player__season=season).delete()
TeamScore.objects.filter(team__season=season).delete()
latereg_players = {latereg.player_id for latereg in
PlayerLateRegistration.objects.filter(round__season=season)}
for sp in season.seasonplayer_set.all():
if sp.player_id in latereg_players:
sp.delete()
else:
sp.is_active = True
sp.save()
# Run each round
for r in season.round_set.order_by('number'):
print('Running round %d' % r.number)
pairinggen.generate_pairings(r)
r.publish_pairings = True
r.save()
simulate_round(r)
r.is_completed = True
r.save()
| mit | 0515c5d6ced8275e27dec93dcc807400 | 32.901408 | 89 | 0.57499 | 3.32 | false | false | false | false |
cyanfish/heltour | heltour/tournament/migrations/0061_auto_20160813_2319.py | 1 | 1046 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-13 23:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tournament', '0060_auto_20160813_2319'),
]
operations = [
migrations.RemoveField(
model_name='loneplayerpairing_old',
name='player_pairing',
),
migrations.RemoveField(
model_name='loneplayerpairing_old',
name='round',
),
migrations.AlterUniqueTogether(
name='teamplayerpairing_old',
unique_together=set([]),
),
migrations.RemoveField(
model_name='teamplayerpairing_old',
name='player_pairing',
),
migrations.RemoveField(
model_name='teamplayerpairing_old',
name='team_pairing',
),
migrations.DeleteModel(
name='LonePlayerPairing_old',
),
migrations.DeleteModel(
name='TeamPlayerPairing_old',
),
]
| mit | 7cccc7ecf762d44b192c7956c3db2cef | 24.512195 | 50 | 0.549713 | 4.150794 | false | false | false | false |
nanograv/enterprise | tests/test_white_signals.py | 2 | 19022 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_white_signals
----------------------------------
Tests for white signal modules.
"""
import unittest
import numpy as np
import scipy.linalg as sl
from enterprise.pulsar import Pulsar
from enterprise.signals import gp_signals, parameter, selections, utils, white_signals
from enterprise.signals.selections import Selection
from tests.enterprise_test_data import datadir
class Woodbury(object):
def __init__(self, N, U, J):
self.N = N
self.U = U
self.J = J
def solve(self, other):
if other.ndim == 1:
Nx = np.array(other / self.N)
elif other.ndim == 2:
Nx = np.array(other / self.N[:, None])
UNx = np.dot(self.U.T, Nx)
Sigma = np.diag(1 / self.J) + np.dot(self.U.T, self.U / self.N[:, None])
cf = sl.cho_factor(Sigma)
if UNx.ndim == 1:
tmp = np.dot(self.U, sl.cho_solve(cf, UNx)) / self.N
else:
tmp = np.dot(self.U, sl.cho_solve(cf, UNx)) / self.N[:, None]
return Nx - tmp
def logdet(self):
Sigma = np.diag(1 / self.J) + np.dot(self.U.T, self.U / self.N[:, None])
cf = sl.cho_factor(Sigma)
ld = np.sum(np.log(self.N)) + np.sum(np.log(self.J))
ld += np.sum(2 * np.log(np.diag(cf[0])))
return ld
class TestWhiteSignals(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim")
# IPTA-like pulsar
cls.ipsr = Pulsar(datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim")
def test_efac(self):
"""Test that efac signal returns correct covariance."""
# set up signal and parameters
efac = parameter.Uniform(0.1, 5)
ef = white_signals.MeasurementNoise(efac=efac)
efm = ef(self.psr)
# parameters
efac = 1.5
params = {"B1855+09_efac": efac}
# correct value
nvec0 = efac**2 * self.psr.toaerrs**2
# test
msg = "EFAC covariance incorrect."
assert np.all(efm.get_ndiag(params) == nvec0), msg
def test_efac_backend(self):
"""Test that backend-efac signal returns correct covariance."""
# set up signal and parameters
efac = parameter.Uniform(0.1, 5)
selection = Selection(selections.by_backend)
ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
efm = ef(self.psr)
# parameters
efacs = [1.3, 1.4, 1.5, 1.6]
params = {
"B1855+09_430_ASP_efac": efacs[0],
"B1855+09_430_PUPPI_efac": efacs[1],
"B1855+09_L-wide_ASP_efac": efacs[2],
"B1855+09_L-wide_PUPPI_efac": efacs[3],
}
# correct value
flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
nvec0 = np.zeros_like(self.psr.toas)
for ct, flag in enumerate(np.unique(flags)):
ind = flag == self.psr.backend_flags
nvec0[ind] = efacs[ct] ** 2 * self.psr.toaerrs[ind] ** 2
# test
msg = "EFAC covariance incorrect."
assert np.all(efm.get_ndiag(params) == nvec0), msg
def test_equad(self):
"""Test that the deprecated EquadNoise is not available."""
self.assertRaises(NotImplementedError, white_signals.EquadNoise)
def test_tnequad(self):
"""Test that tnequad signal returns correct covariance."""
# set up signal and parameters
tnequad = parameter.Uniform(-10, -5)
eq = white_signals.TNEquadNoise(log10_tnequad=tnequad)
eqm = eq(self.psr)
# parameters
tnequad = -6.4
params = {"B1855+09_log10_tnequad": tnequad}
# correct value
nvec0 = 10 ** (2 * tnequad) * np.ones_like(self.psr.toas)
# test
msg = "TNEQUAD covariance incorrect."
assert np.all(eqm.get_ndiag(params) == nvec0), msg
def test_tnequad_backend(self):
"""Test that backend-equad signal returns correct covariance."""
# set up signal and parameters
tnequad = parameter.Uniform(-10, -5)
selection = Selection(selections.by_backend)
eq = white_signals.TNEquadNoise(log10_tnequad=tnequad, selection=selection)
eqm = eq(self.psr)
# parameters
tnequads = [-6.1, -6.2, -6.3, -6.4]
params = {
"B1855+09_430_ASP_log10_tnequad": tnequads[0],
"B1855+09_430_PUPPI_log10_tnequad": tnequads[1],
"B1855+09_L-wide_ASP_log10_tnequad": tnequads[2],
"B1855+09_L-wide_PUPPI_log10_tnequad": tnequads[3],
}
# correct value
flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
nvec0 = np.zeros_like(self.psr.toas)
for ct, flag in enumerate(np.unique(flags)):
ind = flag == self.psr.backend_flags
nvec0[ind] = 10 ** (2 * tnequads[ct]) * np.ones(np.sum(ind))
# test
msg = "TNEQUAD covariance incorrect."
assert np.all(eqm.get_ndiag(params) == nvec0), msg
def test_add_efac_tnequad(self):
"""Test that addition of efac and tnequad signal returns
correct covariance.
"""
# set up signals
efac = parameter.Uniform(0.1, 5)
ef = white_signals.MeasurementNoise(efac=efac)
tnequad = parameter.Uniform(-10, -5)
eq = white_signals.TNEquadNoise(log10_tnequad=tnequad)
s = ef + eq
m = s(self.psr)
# set parameters
efac = 1.5
tnequad = -6.4
params = {"B1855+09_efac": efac, "B1855+09_log10_tnequad": tnequad}
# correct value
nvec0 = efac**2 * self.psr.toaerrs**2
nvec0 += 10 ** (2 * tnequad) * np.ones_like(self.psr.toas)
# test
msg = "EFAC/TNEQUAD covariance incorrect."
assert np.all(m.get_ndiag(params) == nvec0), msg
def test_add_efac_tnequad_backend(self):
"""Test that addition of efac-backend and tnequad-backend signal returns
correct covariance.
"""
selection = Selection(selections.by_backend)
efac = parameter.Uniform(0.1, 5)
tnequad = parameter.Uniform(-10, -5)
ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
eq = white_signals.TNEquadNoise(log10_tnequad=tnequad, selection=selection)
s = ef + eq
m = s(self.psr)
# set parameters
efacs = [1.3, 1.4, 1.5, 1.6]
tnequads = [-6.1, -6.2, -6.3, -6.4]
params = {
"B1855+09_430_ASP_efac": efacs[0],
"B1855+09_430_PUPPI_efac": efacs[1],
"B1855+09_L-wide_ASP_efac": efacs[2],
"B1855+09_L-wide_PUPPI_efac": efacs[3],
"B1855+09_430_ASP_log10_tnequad": tnequads[0],
"B1855+09_430_PUPPI_log10_tnequad": tnequads[1],
"B1855+09_L-wide_ASP_log10_tnequad": tnequads[2],
"B1855+09_L-wide_PUPPI_log10_tnequad": tnequads[3],
}
# correct value
flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
nvec0 = np.zeros_like(self.psr.toas)
for ct, flag in enumerate(np.unique(flags)):
ind = flag == self.psr.backend_flags
nvec0[ind] = efacs[ct] ** 2 * self.psr.toaerrs[ind] ** 2
nvec0[ind] += 10 ** (2 * tnequads[ct]) * np.ones(np.sum(ind))
logdet = np.sum(np.log(nvec0))
# test
msg = "EFAC/TNEQUAD covariance incorrect."
assert np.all(m.get_ndiag(params) == nvec0), msg
msg = "EFAC/TNEQUAD logdet incorrect."
N = m.get_ndiag(params)
assert np.allclose(N.solve(self.psr.residuals, logdet=True)[1], logdet, rtol=1e-10), msg
msg = "EFAC/TNEQUAD D1 solve incorrect."
assert np.allclose(N.solve(self.psr.residuals), self.psr.residuals / nvec0, rtol=1e-10), msg
msg = "EFAC/TNEQUAD 1D1 solve incorrect."
assert np.allclose(
N.solve(self.psr.residuals, left_array=self.psr.residuals),
np.dot(self.psr.residuals / nvec0, self.psr.residuals),
rtol=1e-10,
), msg
msg = "EFAC/TNEQUAD 2D1 solve incorrect."
T = self.psr.Mmat
assert np.allclose(
N.solve(self.psr.residuals, left_array=T), np.dot(T.T, self.psr.residuals / nvec0), rtol=1e-10
), msg
msg = "EFAC/TNEQUAD 2D2 solve incorrect."
assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, T / nvec0[:, None]), rtol=1e-10), msg
def test_efac_equad_combined_backend(self):
"""Test that the combined EFAC + EQUAD noise (tempo/tempo2/pint definition)
returns the correct covariance.
"""
selection = Selection(selections.by_backend)
efac = parameter.Uniform(0.1, 5)
t2equad = parameter.Uniform(-10, -5)
efq = white_signals.MeasurementNoise(efac=efac, log10_t2equad=t2equad, selection=selection)
m = efq(self.psr)
# set parameters
efacs = [1.3, 1.4, 1.5, 1.6]
equads = [-6.1, -6.2, -6.3, -6.4]
params = {
"B1855+09_430_ASP_efac": efacs[0],
"B1855+09_430_PUPPI_efac": efacs[1],
"B1855+09_L-wide_ASP_efac": efacs[2],
"B1855+09_L-wide_PUPPI_efac": efacs[3],
"B1855+09_430_ASP_log10_t2equad": equads[0],
"B1855+09_430_PUPPI_log10_t2equad": equads[1],
"B1855+09_L-wide_ASP_log10_t2equad": equads[2],
"B1855+09_L-wide_PUPPI_log10_t2equad": equads[3],
}
# correct value
flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
nvec0 = np.zeros_like(self.psr.toas)
for ct, flag in enumerate(np.unique(flags)):
ind = flag == self.psr.backend_flags
nvec0[ind] = efacs[ct] ** 2 * (self.psr.toaerrs[ind] ** 2 + 10 ** (2 * equads[ct]) * np.ones(np.sum(ind)))
logdet = np.sum(np.log(nvec0))
# test
msg = "EFAC+EQUAD covariance incorrect."
assert np.all(m.get_ndiag(params) == nvec0), msg
msg = "EFAC+EQUAD logdet incorrect."
N = m.get_ndiag(params)
assert np.allclose(N.solve(self.psr.residuals, logdet=True)[1], logdet, rtol=1e-10), msg
msg = "EFAC+EQUAD D1 solve incorrect."
assert np.allclose(N.solve(self.psr.residuals), self.psr.residuals / nvec0, rtol=1e-10), msg
msg = "EFAC+EQUAD 1D1 solve incorrect."
assert np.allclose(
N.solve(self.psr.residuals, left_array=self.psr.residuals),
np.dot(self.psr.residuals / nvec0, self.psr.residuals),
rtol=1e-10,
), msg
msg = "EFAC+EQUAD 2D1 solve incorrect."
T = self.psr.Mmat
assert np.allclose(
N.solve(self.psr.residuals, left_array=T), np.dot(T.T, self.psr.residuals / nvec0), rtol=1e-10
), msg
msg = "EFAC+EQUAD 2D2 solve incorrect."
assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, T / nvec0[:, None]), rtol=1e-10), msg
def _ecorr_test(self, method="sparse"):
"""Test of sparse/sherman-morrison ecorr signal and solve methods."""
selection = Selection(selections.by_backend)
efac = parameter.Uniform(0.1, 5)
ecorr = parameter.Uniform(-10, -5)
ef = white_signals.MeasurementNoise(efac=efac, selection=selection)
ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection, method=method)
tm = gp_signals.TimingModel()
s = ef + ec + tm
m = s(self.psr)
# set parameters
efacs = [1.3, 1.4, 1.5, 1.6]
ecorrs = [-6.1, -6.2, -6.3, -6.4]
params = {
"B1855+09_430_ASP_efac": efacs[0],
"B1855+09_430_PUPPI_efac": efacs[1],
"B1855+09_L-wide_ASP_efac": efacs[2],
"B1855+09_L-wide_PUPPI_efac": efacs[3],
"B1855+09_430_ASP_log10_ecorr": ecorrs[0],
"B1855+09_430_PUPPI_log10_ecorr": ecorrs[1],
"B1855+09_L-wide_ASP_log10_ecorr": ecorrs[2],
"B1855+09_L-wide_PUPPI_log10_ecorr": ecorrs[3],
}
# get EFAC Nvec
flags = ["430_ASP", "430_PUPPI", "L-wide_ASP", "L-wide_PUPPI"]
nvec0 = np.zeros_like(self.psr.toas)
for ct, flag in enumerate(np.unique(flags)):
ind = flag == self.psr.backend_flags
nvec0[ind] = efacs[ct] ** 2 * self.psr.toaerrs[ind] ** 2
# get the basis
bflags = self.psr.backend_flags
Umats = []
for flag in np.unique(bflags):
mask = bflags == flag
Umats.append(utils.create_quantization_matrix(self.psr.toas[mask], nmin=2)[0])
nepoch = sum(U.shape[1] for U in Umats)
U = np.zeros((len(self.psr.toas), nepoch))
jvec = np.zeros(nepoch)
netot = 0
for ct, flag in enumerate(np.unique(bflags)):
mask = bflags == flag
nn = Umats[ct].shape[1]
U[mask, netot : nn + netot] = Umats[ct]
jvec[netot : nn + netot] = 10 ** (2 * ecorrs[ct])
netot += nn
# get covariance matrix
wd = Woodbury(nvec0, U, jvec)
# test
msg = "EFAC/ECORR {} logdet incorrect.".format(method)
N = m.get_ndiag(params)
assert np.allclose(N.solve(self.psr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-10), msg
msg = "EFAC/ECORR {} D1 solve incorrect.".format(method)
assert np.allclose(N.solve(self.psr.residuals), wd.solve(self.psr.residuals), rtol=1e-10), msg
msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method)
assert np.allclose(
N.solve(self.psr.residuals, left_array=self.psr.residuals),
np.dot(self.psr.residuals, wd.solve(self.psr.residuals)),
rtol=1e-10,
), msg
msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method)
T = m.get_basis()
assert np.allclose(
N.solve(self.psr.residuals, left_array=T), np.dot(T.T, wd.solve(self.psr.residuals)), rtol=1e-10
), msg
msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method)
assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-10), msg
def _ecorr_test_ipta(self, method="sparse"):
"""Test of sparse/sherman-morrison ecorr signal and solve methods."""
selection = Selection(selections.nanograv_backends)
efac = parameter.Uniform(0.1, 5)
ecorr = parameter.Uniform(-10, -5)
ef = white_signals.MeasurementNoise(efac=efac)
ec = white_signals.EcorrKernelNoise(log10_ecorr=ecorr, selection=selection, method=method)
tm = gp_signals.TimingModel()
s = ef + ec + tm
m = s(self.ipsr)
# set parameters
efacs = [1.3]
ecorrs = [-6.1, -6.2, -6.3, -6.4, -7.2, -8.4, -7.1, -7.9]
params = {
"J1713+0747_efac": efacs[0],
"J1713+0747_ASP-L_log10_ecorr": ecorrs[0],
"J1713+0747_ASP-S_log10_ecorr": ecorrs[1],
"J1713+0747_GASP-8_log10_ecorr": ecorrs[2],
"J1713+0747_GASP-L_log10_ecorr": ecorrs[3],
"J1713+0747_GUPPI-8_log10_ecorr": ecorrs[4],
"J1713+0747_GUPPI-L_log10_ecorr": ecorrs[5],
"J1713+0747_PUPPI-L_log10_ecorr": ecorrs[6],
"J1713+0747_PUPPI-S_log10_ecorr": ecorrs[7],
}
# get EFAC Nvec
nvec0 = efacs[0] ** 2 * self.ipsr.toaerrs**2
# get the basis
flags = ["ASP-L", "ASP-S", "GASP-8", "GASP-L", "GUPPI-8", "GUPPI-L", "PUPPI-L", "PUPPI-S"]
bflags = self.ipsr.backend_flags
Umats = []
for flag in np.unique(bflags):
if flag in flags:
mask = bflags == flag
Umats.append(utils.create_quantization_matrix(self.ipsr.toas[mask], nmin=2)[0])
nepoch = sum(U.shape[1] for U in Umats)
U = np.zeros((len(self.ipsr.toas), nepoch))
jvec = np.zeros(nepoch)
netot, ct = 0, 0
for flag in np.unique(bflags):
if flag in flags:
mask = bflags == flag
nn = Umats[ct].shape[1]
U[mask, netot : nn + netot] = Umats[ct]
jvec[netot : nn + netot] = 10 ** (2 * ecorrs[ct])
netot += nn
ct += 1
# get covariance matrix
wd = Woodbury(nvec0, U, jvec)
# test
msg = "EFAC/ECORR {} logdet incorrect.".format(method)
N = m.get_ndiag(params)
assert np.allclose(N.solve(self.ipsr.residuals, logdet=True)[1], wd.logdet(), rtol=1e-8), msg
msg = "EFAC/ECORR {} D1 solve incorrect.".format(method)
assert np.allclose(N.solve(self.ipsr.residuals), wd.solve(self.ipsr.residuals), rtol=1e-8), msg
msg = "EFAC/ECORR {} 1D1 solve incorrect.".format(method)
assert np.allclose(
N.solve(self.ipsr.residuals, left_array=self.ipsr.residuals),
np.dot(self.ipsr.residuals, wd.solve(self.ipsr.residuals)),
rtol=1e-8,
), msg
msg = "EFAC/ECORR {} 2D1 solve incorrect.".format(method)
T = m.get_basis()
assert np.allclose(
N.solve(self.ipsr.residuals, left_array=T), np.dot(T.T, wd.solve(self.ipsr.residuals)), rtol=1e-8
), msg
msg = "EFAC/ECORR {} 2D2 solve incorrect.".format(method)
assert np.allclose(N.solve(T, left_array=T), np.dot(T.T, wd.solve(T)), rtol=1e-8), msg
def test_ecorr_sparse(self):
"""Test of sparse ecorr signal and solve methods."""
self._ecorr_test(method="sparse")
def test_ecorr_sherman_morrison(self):
"""Test of sherman-morrison ecorr signal and solve methods."""
self._ecorr_test(method="sherman-morrison")
def test_ecorr_block(self):
"""Test of block matrix ecorr signal and solve methods."""
self._ecorr_test(method="block")
def test_ecorr_sparse_ipta(self):
"""Test of sparse ecorr signal and solve methods."""
self._ecorr_test_ipta(method="sparse")
def test_ecorr_sherman_morrison_ipta(self):
"""Test of sherman-morrison ecorr signal and solve methods."""
self._ecorr_test_ipta(method="sherman-morrison")
def test_ecorr_block_ipta(self):
"""Test of block matrix ecorr signal and solve methods."""
self._ecorr_test_ipta(method="block")
class TestWhiteSignalsPint(TestWhiteSignals):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(
datadir + "/B1855+09_NANOGrav_9yv1.gls.par",
datadir + "/B1855+09_NANOGrav_9yv1.tim",
ephem="DE430",
timing_package="pint",
)
# IPTA-like pulsar
cls.ipsr = Pulsar(
datadir + "/1713.Sep.T2.par", datadir + "/1713.Sep.T2.tim", ephem="DE421", timint_package="pint"
)
| mit | 6d16f4844e2fe560659ce112b4f7c35a | 36.592885 | 118 | 0.569288 | 2.874717 | false | true | false | false |
nanograv/enterprise | enterprise/signals/selections.py | 2 | 3956 | # selections.py
"""Contains various selection functions to mask parameters by backend flags,
time-intervals, etc."""
import functools
import inspect
import numpy as np
def call_me_maybe(obj):
"""See `here`_ for description.
.. _here: https://www.youtube.com/watch?v=fWNaR-rxAic
"""
return obj() if hasattr(obj, "__call__") else obj
def selection_func(func):
try:
funcargs = inspect.getfullargspec(func).args
except:
funcargs = inspect.getargspec(func).args
@functools.wraps(func)
def wrapper(*args, **kwargs):
targs = list(args)
# check for mask
mask = kwargs.get("mask", Ellipsis)
if "mask" in kwargs:
del kwargs["mask"]
if len(targs) < len(funcargs) and "psr" in kwargs:
psr = kwargs["psr"]
for funcarg in funcargs[len(args) :]:
if funcarg not in kwargs and hasattr(psr, funcarg):
attr = call_me_maybe(getattr(psr, funcarg))
if isinstance(attr, np.ndarray) and getattr(mask, "shape", [0])[0] == len(attr):
targs.append(attr[mask])
else:
targs.append(attr)
if "psr" in kwargs and "psr" not in funcargs:
del kwargs["psr"]
return func(*targs, **kwargs)
return wrapper
def Selection(func):
"""Class factory for residual selection."""
# if we wished to pre-wrap standard selections below with the decorator @Selection,
# here we would make sure they are not wrapped twice
# if hasattr(func, 'masks'):
# return func
class Selection(object):
def __init__(self, psr):
self._psr = psr
@property
def masks(self):
return selection_func(func)(psr=self._psr)
def _get_masked_array_dict(self, masks, arr):
return {key: val * arr for key, val in masks.items()}
def __call__(self, parname, parameter, arr=None):
params, kmasks = {}, {}
for key, val in self.masks.items():
kname = "_".join([key, parname]) if key else parname
pname = "_".join([self._psr.name, kname])
params.update({kname: parameter(pname)})
kmasks.update({kname: val})
if arr is not None:
ma = self._get_masked_array_dict(kmasks, arr)
ret = (params, ma)
else:
ret = params, kmasks
return ret
return Selection
# SELECTION FUNCTIONS
def cut_half(toas):
"""Selection function to split by data segment"""
midpoint = (toas.max() + toas.min()) / 2
return dict(zip(["t1", "t2"], [toas <= midpoint, toas > midpoint]))
def by_band(flags):
"""Selection function to split by PPTA frequency band under -B flag"""
flagvals = np.unique(flags["B"])
return {val: flags["B"] == val for val in flagvals}
def by_frontend(flags):
"""Selection function to split by frontend under -fe flag"""
flagvals = np.unique(flags["fe"])
return {val: flags["fe"] == val for val in flagvals}
def by_backend(backend_flags):
"""Selection function to split by backend flags."""
flagvals = np.unique(backend_flags)
return {val: backend_flags == val for val in flagvals}
def nanograv_backends(backend_flags):
"""Selection function to split by NANOGRav backend flags only."""
flagvals = np.unique(backend_flags)
ngb = ["ASP", "GASP", "GUPPI", "PUPPI", "YUPPI"]
flagvals = [val for val in flagvals if any([b in val for b in ngb])]
return {val: backend_flags == val for val in flagvals}
def by_telescope(telescope):
"""Selection function to split by telescope"""
telescopes = np.unique(telescope)
return {t: (telescope == t) for t in telescopes}
def no_selection(toas):
"""Default selection with no splitting."""
return {"": np.ones_like(toas, dtype=bool)}
| mit | 2c046cb6462f7e15daff8c4880ff1ab6 | 28.522388 | 100 | 0.588473 | 3.70412 | false | false | false | false |
pytest-dev/pytest-bdd | tests/feature/test_alias.py | 1 | 1585 | """Test step alias when decorated multiple times."""
import textwrap
def test_step_alias(pytester):
pytester.makefile(
".feature",
alias=textwrap.dedent(
"""\
Feature: Step aliases
Scenario: Multiple step aliases
Given I have an empty list
And I have foo (which is 1) in my list
# Alias of the "I have foo (which is 1) in my list"
And I have bar (alias of foo) in my list
When I do crash (which is 2)
And I do boom (alias of crash)
Then my list should be [1, 1, 2, 2]
"""
),
)
pytester.makepyfile(
textwrap.dedent(
"""\
import pytest
from pytest_bdd import given, when, then, scenario
@scenario("alias.feature", "Multiple step aliases")
def test_alias():
pass
@given("I have an empty list", target_fixture="results")
def _():
return []
@given("I have foo (which is 1) in my list")
@given("I have bar (alias of foo) in my list")
def _(results):
results.append(1)
@when("I do crash (which is 2)")
@when("I do boom (alias of crash)")
def _(results):
results.append(2)
@then("my list should be [1, 1, 2, 2]")
def _(results):
assert results == [1, 1, 2, 2]
"""
)
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
| mit | 2e8808706ea7ef580cc08ee77977a885 | 25.416667 | 71 | 0.490852 | 4.342466 | false | true | false | false |
neuropoly/spinalcordtoolbox | testing/cli/test_cli.py | 1 | 1746 | import pytest
import pkg_resources
import importlib
scripts = pkg_resources.get_entry_map('spinalcordtoolbox')['console_scripts'].keys()
scripts_where_no_args_is_valid = [
'isct_test_ants', # No args -> tests ants binaries
'sct_check_dependencies', # No args -> checks dependencies
'sct_version' # No args -> prints version
]
scripts_to_test = [s for s in scripts if s not in scripts_where_no_args_is_valid]
scripts_without_callable_main = [
'isct_convert_binary_to_trilinear', # Uses 'getopt.getopt(sys.argv[1:])' instead of argparse
'isct_minc2volume-viewer', # Does parsing outside of main()
]
scripts_with_callable_main = [s for s in scripts_to_test if s not in scripts_without_callable_main]
@pytest.mark.parametrize("script", scripts_with_callable_main)
def test_scripts_with_no_args_as_main_func(capsys, script):
"""Test that [SCRIPTS_CALLABLE_WITH_MAIN] all return error code 2 and
show usage descriptions when called with no arguments."""
mod = importlib.import_module(f"spinalcordtoolbox.scripts.{script}")
with pytest.raises(SystemExit) as system_err:
mod.main(argv=[])
captured = capsys.readouterr()
assert system_err.value.code is 2
assert 'usage' in captured.err.lower()
@pytest.mark.script_launch_mode('subprocess')
@pytest.mark.parametrize("script", scripts_without_callable_main)
def test_scripts_with_no_args_as_subprocess(script, script_runner):
"""Test that [SCRIPTS_NOT_CALLABLE_WITH_MAIN] all return error code 2 and
show usage descriptions when called with no arguments."""
ret = script_runner.run(script)
assert ret.returncode is 2
assert 'usage' in ret.stdout.lower() or 'usage' in ret.stderr.lower()
| mit | c36eb6a92f4177aadcb58b7c2215da24 | 39.604651 | 99 | 0.709622 | 3.457426 | false | true | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/deepseg_sc/cnn_models_3d.py | 1 | 3867 | """Model.
Created: 2018-05
Last changes: 2018-05-23
Contributors: charley
"""
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.engine import Input, Model
from keras.layers import Conv3D, MaxPooling3D, UpSampling3D, Activation, BatchNormalization
from keras.optimizers import Adam
from keras.models import load_model
from keras.layers.merge import concatenate
# Note: `K.set_image_data_format("channels_first")` was removed from this file because it interfered
# with other tests. It may need to be re-added for this function to work properly. (See #2954)
def dice_coefficient(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coefficient_loss(y_true, y_pred):
return -dice_coefficient(y_true, y_pred)
def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,
depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):
inputs = Input(input_shape)
current_layer = inputs
levels = list()
for layer_depth in range(depth):
layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),
batch_normalization=batch_normalization)
layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,
batch_normalization=batch_normalization)
if layer_depth < depth - 1:
current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
levels.append([layer1, layer2, current_layer])
else:
current_layer = layer2
levels.append([layer1, layer2])
for layer_depth in range(depth - 2, -1, -1):
up_convolution = UpSampling3D(size=pool_size)
concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
input_layer=concat, batch_normalization=batch_normalization)
current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
input_layer=current_layer,
batch_normalization=batch_normalization)
final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
act = Activation('sigmoid')(final_convolution)
model = Model(inputs=inputs, outputs=act)
if not isinstance(metrics, list):
metrics = [metrics]
model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
return model
def create_convolution_block(input_layer, n_filters, batch_normalization=False,
kernel=(3, 3, 3), activation=None, padding='same',
strides=(1, 1, 1)):
layer = Conv3D(n_filters, kernel, padding=padding, strides=strides)(input_layer)
if batch_normalization:
layer = BatchNormalization(axis=1)(layer)
if activation is None:
return Activation('relu')(layer)
else:
return activation()(layer)
def compute_level_output_shape(n_filters, depth, pool_size, image_shape):
output_image_shape = np.asarray(np.divide(image_shape, np.power(pool_size, depth)), dtype=np.int32).tolist()
return tuple([None, n_filters] + output_image_shape)
def load_trained_model(model_file):
custom_objects = {'dice_coefficient_loss': dice_coefficient_loss, 'dice_coefficient': dice_coefficient}
return load_model(model_file, custom_objects=custom_objects)
| mit | c71657bca887f58a02eb88e07f102656 | 41.494505 | 113 | 0.650634 | 3.641243 | false | false | false | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/aggregate_slicewise.py | 1 | 25919 | #!/usr/bin/env python
# -*- coding: utf-8
# Functions dealing with metrics aggregation (mean, std, etc.) across slices and/or vertebral levels
# TODO: when mask is empty, raise specific message instead of throwing "Weight sum to zero..."
import os
import numpy as np
import math
import operator
import functools
import csv
import datetime
import logging
import wquantiles
from spinalcordtoolbox.template import get_slices_from_vertebral_levels, get_vertebral_level_from_slice
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils import __version__, parse_num_list_inv
class Metric:
"""
Class to include in dictionaries to associate data and label
"""
def __init__(self, data=None, label=''):
"""
:param data: ndarray
:param label: str
"""
self.data = data
self.label = label
class LabelStruc:
"""
Class for labels
"""
def __init__(self, id, name, filename=None, map_cluster=None):
self.id = id
self.name = name
self.filename = filename
self.map_cluster = map_cluster
def func_bin(data, mask, map_clusters=None):
"""
Get the average of data after binarizing the input mask
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Binarize mask
mask_bin = np.where(mask >= 0.5, 1, 0)
# run weighted average
return func_wa(data, mask_bin)
def func_max(data, mask=None, map_clusters=None):
"""
Get the max of an array
:param data: nd-array: input data
:param mask: not used
:param map_clusters: not used
:return:
"""
return np.max(data), None
def func_map(data, mask, map_clusters):
"""
Compute maximum a posteriori (MAP) by aggregating the last dimension of mask according to a clustering method
defined by map_clusters
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask. Note: this mask should include ALL labels to satisfy the necessary condition for\
ML-based estimation, i.e., at each voxel, the sum of all labels (across the last dimension) equals the probability\
to be inside the tissue. For example, for a pixel within the spinal cord, the sum of all labels should be 1.
:param map_clusters: list of list of int: Each sublist corresponds to a cluster of labels where ML estimation will\
be performed to provide the prior beta_0 for MAP estimation.
:return: float: beta corresponding to the first label (beta[0])
:return: nd-array: matrix of all beta
"""
# Check number of labels and map_clusters
assert mask.shape[-1] == len(map_clusters)
# Iterate across all labels (excluding the first one) and generate cluster labels. Examples of input/output:
# [[0], [0], [0], [1], [2], [0]] --> [0, 0, 0, 1, 2, 0]
# [[0, 1], [0], [0], [1], [2]] --> [0, 0, 0, 0, 1]
# [[0, 1], [0], [1], [2], [3]] --> [0, 0, 0, 1, 2]
possible_clusters = [map_clusters[0]]
id_clusters = [0] # this one corresponds to the first cluster
for i_cluster in map_clusters[1:]: # skip the first
found_index = False
for possible_cluster in possible_clusters:
if i_cluster[0] in possible_cluster:
id_clusters.append(possible_clusters.index(possible_cluster))
found_index = True
if not found_index:
possible_clusters.append(i_cluster)
id_clusters.append(possible_clusters.index([i_cluster[0]]))
# Sum across each clustered labels, then concatenate to generate mask_clusters
# mask_clusters has dimension: x, y, z, n_clustered_labels, with n_clustered_labels being equal to the number of
# clusters that need to be estimated for ML method. Let's assume:
# label_struc = [
# LabelStruc(id=0, map_cluster=0),
# LabelStruc(id=1, map_cluster=0),
# LabelStruc(id=2, map_cluster=0),
# LabelStruc(id=3, map_cluster=1),
# LabelStruc(id=4, map_cluster=2),
# ]
#
# Examples of scenario below for ML estimation:
# labels_id_user = [0,1], mask_clusters = [np.sum(label[0:2]), label[3], label[4]]
# labels_id_user = [3], mask_clusters = [np.sum(label[0:2]), label[3], label[4]]
# labels_id_user = [0,1,2,3], mask_clusters = [np.sum(label(0:3)), label[4]]
mask_l = []
for i_cluster in list(set(id_clusters)):
# Get label indices for given cluster
id_label_cluster = [i for i in range(len(id_clusters)) if i_cluster == id_clusters[i]]
# Sum all labels for this cluster
mask_l.append(np.expand_dims(np.sum(mask[..., id_label_cluster], axis=(mask.ndim - 1)), axis=(mask.ndim - 1)))
mask_clusters = np.concatenate(mask_l, axis=(mask.ndim-1))
# Run ML estimation for each clustered labels
_, beta_cluster = func_ml(data, mask_clusters)
# MAP estimation:
# y [nb_vox x 1]: measurements vector (to which weights are applied)
# x [nb_vox x nb_labels]: linear relation between the measurements y
# beta_0 [nb_labels]: A priori values estimated per cluster using ML.
# beta [nb_labels] = beta_0 + (Xt . X + 1)^(-1) . Xt . (y - X . beta_0): The estimated metric value in each label
#
# Note: for simplicity we consider that sigma_noise = sigma_label
n_vox = functools.reduce(operator.mul, data.shape, 1)
y = np.reshape(data, n_vox)
x = np.reshape(mask, (n_vox, mask.shape[mask.ndim-1]))
beta_0 = [beta_cluster[id_clusters[i_label]] for i_label in range(mask.shape[-1])]
beta = beta_0 + np.dot(np.linalg.pinv(np.dot(x.T, x) + np.diag(np.ones(mask.shape[-1]))),
np.dot(x.T,
(y - np.dot(x, beta_0))))
return beta[0], beta
def func_median(data, mask, map_clusters=None):
"""
Compute weighted median. This is a "non-discrete" implementation of the median, in that it computes the mean between
the middle discrete values. For more context, see: https://github.com/nudomarinero/wquantiles/issues/4
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
if mask.ndim == data.ndim + 1:
mask = mask[..., 0]
data, mask = data.reshape(-1), mask.reshape(-1)
return wquantiles.median(data, mask), None
def func_ml(data, mask, map_clusters=None):
"""
Compute maximum likelihood (ML) for the first label of mask.
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask. Note: this mask should include ALL labels to satisfy the necessary condition\
for ML-based estimation, i.e., at each voxel, the sum of all labels (across the last dimension) equals the\
probability to be inside the tissue. For example, for a pixel within the spinal cord, the sum of all labels should\
be 1.
:return: float: beta corresponding to the first label
"""
# TODO: support weighted least square
# reshape as 1d vector (for data) and 2d vector (for mask)
n_vox = functools.reduce(operator.mul, data.shape, 1)
# ML estimation:
# y: measurements vector (to which weights are applied)
# x: linear relation between the measurements y
# beta [nb_labels] = (Xt . X)^(-1) . Xt . y: The estimated metric value in each label
y = np.reshape(data, n_vox) # [nb_vox x 1]
x = np.reshape(mask, (n_vox, mask.shape[mask.ndim-1]))
beta = np.dot(np.linalg.pinv(np.dot(x.T, x)), np.dot(x.T, y))
return beta[0], beta
def func_std(data, mask=None, map_clusters=None):
"""
Compute standard deviation
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
if mask.ndim == data.ndim + 1:
mask = mask[..., 0]
average, _ = func_wa(data, np.expand_dims(mask, axis=mask.ndim))
variance = np.average((data - average) ** 2, weights=mask)
return math.sqrt(variance), None
def func_sum(data, mask=None, map_clusters=None):
"""
Compute sum
:param data: nd-array: input data
:param mask: not used
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
return np.sum(data), None
def func_wa(data, mask=None, map_clusters=None):
"""
Compute weighted average
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
if mask.ndim == data.ndim + 1:
mask = mask[..., 0]
return np.average(data, weights=mask), None
def aggregate_per_slice_or_level(metric, mask=None, slices=[], levels=[], distance_pmj=None, perslice=None,
perlevel=False, vert_level=None, group_funcs=(('MEAN', func_wa),), map_clusters=None):
"""
The aggregation will be performed along the last dimension of 'metric' ndarray.
:param metric: Class Metric(): data to aggregate.
:param mask: Class Metric(): mask to use for aggregating the data. Optional.
:param slices: List[int]: Slices to aggregate metric from. If empty, select all slices.
:param levels: List[int]: Vertebral levels to aggregate metric from. It has priority over "slices".
:param distance_pmj: float: Distance from Ponto-Medullary Junction (PMJ) in mm.
:param Bool perslice: Aggregate per slice (True) or across slices (False)
:param Bool perlevel: Aggregate per level (True) or across levels (False). Has priority over "perslice".
:param vert_level: Vertebral level. Could be either an Image or a file name.
:param tuple group_funcs: Name and function to apply on metric. Example: (('MEAN', func_wa),)). Note, the function
has special requirements in terms of i/o. See the definition to func_wa and use it as a template.
:param map_clusters: list of list of int: See func_map()
:return: Aggregated metric
"""
if vert_level:
# Assumption: vert_level image will only ever be 3D or 4D
vert_level_slices = Image(vert_level).change_orientation('RPI').data.shape[2]
# Get slices ('z') from metrics regardless of whether they're 1D [z], 3D [x, y, z], and 4D [x, y, z, t]
metric_slices = metric.data.shape[2] if len(metric.data.shape) >= 3 else metric.data.shape[0]
if vert_level_slices != metric_slices:
raise ValueError(f"Shape mismatch between vertfile [{vert_level_slices}] and metric [{metric_slices}]). "
f"Please verify that your vertfile has the same number of slices as your input image, "
f"and that your metric is RPI/LPI oriented.")
# If user neither specified slices nor levels, set perslice=True, otherwise, the output will likely contain nan
# because in many cases the segmentation does not span the whole I-S dimension.
if perslice is None:
if not slices and not levels:
perslice = True
else:
perslice = False
# if slices is empty, select all available slices from the metric
ndim = metric.data.ndim
if not slices:
slices = range(metric.data.shape[ndim-1])
# aggregation based on levels
vertgroups = None
if levels:
im_vert_level = Image(vert_level).change_orientation('RPI')
# slicegroups = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
slicegroups = [tuple(get_slices_from_vertebral_levels(im_vert_level, level)) for level in levels]
if perlevel:
# vertgroups = [(2,), (3,), (4,)]
vertgroups = [tuple([level]) for level in levels]
elif perslice:
# slicegroups = [(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)]
slicegroups = [tuple([i]) for i in functools.reduce(operator.concat, slicegroups)] # reduce to individual tuple
# vertgroups = [(2,), (2,), (2,), (3,), (3,), (3,), (4,), (4,), (4,)]
vertgroups = [tuple([get_vertebral_level_from_slice(im_vert_level, i[0])]) for i in slicegroups]
# output aggregate metric across levels
else:
# slicegroups = [(0, 1, 2, 3, 4, 5, 6, 7, 8)]
slicegroups = [tuple([val for sublist in slicegroups for val in sublist])] # flatten into single tuple
# vertgroups = [(2, 3, 4)]
vertgroups = [tuple([level for level in levels])]
# aggregation based on slices
else:
if perslice:
# slicegroups = [(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)]
slicegroups = [tuple([slice]) for slice in slices]
else:
# slicegroups = [(0, 1, 2, 3, 4, 5, 6, 7, 8)]
slicegroups = [tuple(slices)]
agg_metric = dict((slicegroup, dict()) for slicegroup in slicegroups)
# loop across slice group
for slicegroup in slicegroups:
# add distance from PMJ info
if distance_pmj is not None:
agg_metric[slicegroup]['DistancePMJ'] = [distance_pmj]
else:
agg_metric[slicegroup]['DistancePMJ'] = None
# add level info
if vertgroups is None:
agg_metric[slicegroup]['VertLevel'] = None
else:
agg_metric[slicegroup]['VertLevel'] = vertgroups[slicegroups.index(slicegroup)]
# Loop across functions (e.g.: MEAN, STD)
for (name, func) in group_funcs:
try:
data_slicegroup = metric.data[..., slicegroup] # selection is done in the last dimension
if mask is not None:
mask_slicegroup = mask.data[..., slicegroup, :]
agg_metric[slicegroup]['Label'] = mask.label
# For size calculation, only the first index [0] is relevant (See spinalcordtoolbox/issues/3216)
agg_metric[slicegroup]['Size [vox]'] = np.sum(mask_slicegroup[..., 0])
else:
mask_slicegroup = np.ones(data_slicegroup.shape)
# Ignore nonfinite values
i_nonfinite = np.where(np.isfinite(data_slicegroup) == False)
data_slicegroup[i_nonfinite] = 0.
# TODO: the lines below could probably be done more elegantly
if mask_slicegroup.ndim == data_slicegroup.ndim + 1:
arr_tmp_concat = []
for i in range(mask_slicegroup.shape[-1]):
arr_tmp = np.reshape(mask_slicegroup[..., i], data_slicegroup.shape)
arr_tmp[i_nonfinite] = 0.
arr_tmp_concat.append(np.expand_dims(arr_tmp, axis=(mask_slicegroup.ndim-1)))
mask_slicegroup = np.concatenate(arr_tmp_concat, axis=(mask_slicegroup.ndim-1))
else:
mask_slicegroup[i_nonfinite] = 0.
# Make sure the number of pixels to extract metrics is not null
if mask_slicegroup.sum() == 0:
result = None
else:
# Run estimation
result, _ = func(data_slicegroup, mask_slicegroup, map_clusters)
# check if nan
if np.isnan(result):
result = None
# here we create a field with name: FUNC(METRIC_NAME). Example: MEAN(CSA)
agg_metric[slicegroup]['{}({})'.format(name, metric.label)] = result
except Exception as e:
logging.warning(e)
agg_metric[slicegroup]['{}({})'.format(name, metric.label)] = str(e)
return agg_metric
def check_labels(indiv_labels_ids, selected_labels):
"""Check the consistency of the labels asked by the user."""
# convert strings to int
list_ids_of_labels_of_interest = list(map(int, indiv_labels_ids))
if selected_labels:
# Remove redundant values
list_ids_of_labels_of_interest = [i_label for n, i_label in enumerate(selected_labels) if i_label not in selected_labels[:n]]
# Check if the selected labels are in the available labels ids
if not set(list_ids_of_labels_of_interest).issubset(set(indiv_labels_ids)):
logging.error(
'At least one of the selected labels (' + str(list_ids_of_labels_of_interest) + ') is not available \
according to the label list from the text file in the atlas folder.')
return list_ids_of_labels_of_interest
def diff_between_list_or_int(l1, l2):
"""
Return list l1 minus the elements in l2
Examples:\
([1, 2, 3], 1) --> [2, 3]\
([1, 2, 3], [1, 2] --> [3]
:param l1: a list of int
:param l2: could be a list or an int
:return:
"""
if isinstance(l2, int):
l2 = [l2]
return [x for x in l1 if x not in l2]
def extract_metric(data, labels=None, slices=None, levels=None, perslice=True, perlevel=False,
vert_level=None, method=None, label_struc=None, id_label=None, indiv_labels_ids=None):
"""
Extract metric within a data, using mask and a given method.
:param data: Class Metric(): Data (a.k.a. metric) of n-dimension to extract aggregated value from
:param labels: Class Metric(): Labels of (n+1)dim. The last dim encloses the labels.
:param slices:
:param levels:
:param perslice:
:param perlevel:
:param vert_level:
:param method:
:param label_struc: LabelStruc class defined above
:param id_label: int: ID of label to select
:param indiv_labels_ids: list of int: IDs of labels corresponding to individual (as opposed to combined) labels for\
use with ML or MAP estimation.
:return: aggregate_per_slice_or_level()
"""
# Initializations
map_clusters = None
func_methods = {'ml': ('ML', func_ml), 'map': ('MAP', func_map)} # TODO: complete dict with other methods
# If label_struc[id_label].id is a list (i.e. comes from a combined labels), sum all labels
if isinstance(label_struc[id_label].id, list):
labels_sum = np.sum(labels[..., label_struc[id_label].id], axis=labels.ndim-1) # (nx, ny, nz, 1)
else:
labels_sum = labels[..., label_struc[id_label].id]
# expand dim: labels_sum=(..., 1)
ndim = labels_sum.ndim
labels_sum = np.expand_dims(labels_sum, axis=ndim)
# Maximum Likelihood or Maximum a Posteriori
if method in ['ml', 'map']:
# Get the complementary list of labels (the ones not asked by the user)
id_label_compl = diff_between_list_or_int(indiv_labels_ids, label_struc[id_label].id)
# Generate a list of map_clusters for each label. Start with the first label (the one chosen by the user).
# Note that the first label could be a combination of several labels (e.g., WM and GM).
if isinstance(label_struc[id_label].id, list):
# in case there are several labels for this id_label
map_clusters = [list(set([label_struc[i].map_cluster for i in label_struc[id_label].id]))]
else:
# in case there is only one label for this id_label
map_clusters = [[label_struc[id_label].map_cluster]]
# Append the cluster for each remaining labels (i.e. the ones not included in the combined labels)
for i_cluster in [label_struc[i].map_cluster for i in id_label_compl]:
map_clusters.append([i_cluster])
# Concatenate labels: first, the one asked by the user, then the remaining ones.
# Examples of scenario:
# labels_sum = [[0], [1:36]]
# labels_sum = [[3,4], [0,2,5:36]]
labels_sum = np.concatenate([labels_sum, labels[..., id_label_compl]], axis=ndim)
mask = Metric(data=labels_sum, label=label_struc[id_label].name)
group_funcs = (func_methods[method], ('STD', func_std))
# Weighted average
elif method == 'wa':
mask = Metric(data=labels_sum, label=label_struc[id_label].name)
group_funcs = (('WA', func_wa), ('STD', func_std))
# Weighted median
elif method == 'median':
mask = Metric(data=labels_sum, label=label_struc[id_label].name)
group_funcs = (('MEDIAN', func_median), ('STD', func_std))
# Binarize mask
elif method == 'bin':
mask = Metric(data=labels_sum, label=label_struc[id_label].name)
group_funcs = (('BIN', func_bin), ('STD', func_std))
# Maximum
elif method == 'max':
mask = Metric(data=labels_sum, label=label_struc[id_label].name)
group_funcs = (('MAX', func_max),)
return aggregate_per_slice_or_level(data, mask=mask, slices=slices, levels=levels, perslice=perslice,
perlevel=perlevel, vert_level=vert_level, group_funcs=group_funcs,
map_clusters=map_clusters)
def make_a_string(item):
"""Convert tuple or list or None to a string. Important: elements in tuple or list are separated with ; (not ,)
for compatibility with csv."""
if isinstance(item, tuple) or isinstance(item, list):
return ';'.join([str(i) for i in item])
elif item is None:
return 'None'
else:
return item
def merge_dict(dict_in):
"""
Merge n dictionaries that are contained at the root key
.. code-block:: python
dict_in = {
'area': {(0): {'Level': 0, 'Mean(area)': 0.5}, (1): {'Level': 1, 'Mean(area)': 0.2}}
'angle_RL': {(0): {'Level': 0, 'Mean(angle_RL)': 15}, (1): {'Level': 1, 'Mean(angle_RL)': 12}}
}
dict_merged = {
(0): {'Level': 0, 'Mean(area): 0.5, 'Mean(angle_RL): 15}
(1): {'Level': 1, 'Mean(area): 0.2, 'Mean(angle_RL): 12}
}
:param dict_in: input dict.
:return: normalized dict with sub-dicts at root level
"""
dict_merged = {}
metrics = [k for i, (k, v) in enumerate(dict_in.items())]
# Fetch first parent key (metric), then loop across children keys (slicegroup):
dict_first_metric = dict_in[metrics[0]]
# Loop across children keys: slicegroup = [(0), (1), ...]
for slicegroup in [k for i, (k, v) in enumerate(dict_first_metric.items())]:
# Initialize dict with information from first metric
dict_merged[slicegroup] = dict_first_metric[slicegroup]
# Loop across remaining metrics
for metric in metrics:
dict_merged[slicegroup].update(dict_in[metric][slicegroup])
return dict_merged
def save_as_csv(agg_metric, fname_out, fname_in=None, append=False):
"""
Write metric structure as csv. If field 'error' exists, it will add a specific column.
:param agg_metric: output of aggregate_per_slice_or_level()
:param fname_out: output filename. Extention (.csv) will be added if it does not exist.
:param fname_in: input file to be listed in the csv file (e.g., segmentation file which produced the results).
:param append: Bool: Append results at the end of file (if exists) instead of overwrite.
:return:
"""
# Item sorted in order for display in csv output
# list_item = ['VertLevel', 'Label', 'MEAN', 'WA', 'BIN', 'ML', 'MAP', 'STD', 'MAX']
# TODO: The thing below is ugly and needs to be fixed, but this is the only solution I found to order the columns
# without refactoring the code with OrderedDict.
list_item = ['Label', 'Size [vox]', 'MEAN(area)', 'STD(area)', 'MEAN(angle_AP)', 'STD(angle_AP)', 'MEAN(angle_RL)',
'STD(angle_RL)', 'MEAN(diameter_AP)', 'STD(diameter_AP)', 'MEAN(diameter_RL)', 'STD(diameter_RL)',
'MEAN(eccentricity)', 'STD(eccentricity)', 'MEAN(orientation)', 'STD(orientation)',
'MEAN(solidity)', 'STD(solidity)', 'SUM(length)', 'WA()', 'BIN()', 'ML()', 'MAP()', 'MEDIAN()',
'STD()', 'MAX()']
# TODO: if append=True but file does not exist yet, raise warning and set append=False
# write header (only if append=False)
if not append or not os.path.isfile(fname_out):
with open(fname_out, 'w') as csvfile:
# spamwriter = csv.writer(csvfile, delimiter=',')
header = ['Timestamp', 'SCT Version', 'Filename', 'Slice (I->S)', 'VertLevel', 'DistancePMJ']
agg_metric_key = [v for i, (k, v) in enumerate(agg_metric.items())][0]
for item in list_item:
for key in agg_metric_key:
if item in key:
header.append(key)
break
writer = csv.DictWriter(csvfile, fieldnames=header)
writer.writeheader()
# populate data
with open(fname_out, 'a') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
for slicegroup in sorted(agg_metric.keys()):
line = list()
line.append(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # Timestamp
line.append(__version__) # SCT Version
line.append(fname_in) # file name associated with the results
line.append(parse_num_list_inv(slicegroup)) # list all slices in slicegroup
line.append(parse_num_list_inv(agg_metric[slicegroup]['VertLevel'])) # list vertebral levels
line.append(parse_num_list_inv(agg_metric[slicegroup]['DistancePMJ'])) # list distance from PMJ
agg_metric_key = [v for i, (k, v) in enumerate(agg_metric.items())][0]
for item in list_item:
for key in agg_metric_key:
if item in key:
line.append(str(agg_metric[slicegroup][key]))
break
spamwriter.writerow(line)
| mit | ce8aa869cf7d5a6452ef48dea840eec5 | 45.119217 | 133 | 0.611096 | 3.590884 | false | false | false | false |
neuropoly/spinalcordtoolbox | testing/api/test_deepseg_sc.py | 1 | 3912 | #!/usr/bin/env python
# -*- coding: utf-8
# pytest unit tests for spinalcordtoolbox.deepseg_sc
import pytest
import numpy as np
import nibabel as nib
from keras import backend as K
import spinalcordtoolbox as sct
from spinalcordtoolbox.image import Image
import spinalcordtoolbox.deepseg_sc.core
from spinalcordtoolbox.testing.create_test_data import dummy_centerline
from spinalcordtoolbox.utils import sct_test_path
param_deepseg = [
({'fname_seg_manual':
sct_test_path('t2', 't2_seg-deepseg_sc-2d.nii.gz'),
'contrast': 't2', 'kernel': '2d'}),
({'fname_seg_manual':
sct_test_path('t2', 't2_seg-deepseg_sc-3d.nii.gz'),
'contrast': 't2', 'kernel': '3d'}),
]
# noinspection 801,PyShadowingNames
@pytest.mark.parametrize('params', param_deepseg)
def test_deep_segmentation_spinalcord(params):
"""High level segmentation API"""
fname_im = sct_test_path('t2', 't2.nii.gz')
fname_centerline_manual = sct_test_path('t2', 't2_centerline-manual.nii.gz')
# Call segmentation function
im_seg, _, _ = sct.deepseg_sc.core.deep_segmentation_spinalcord(
Image(fname_im), params['contrast'], ctr_algo='file', ctr_file=fname_centerline_manual, brain_bool=False,
kernel_size=params['kernel'], threshold_seg=0.5)
assert im_seg.data.dtype == np.dtype('uint8')
# Compare with ground-truth segmentation
assert np.all(im_seg.data == Image(params['fname_seg_manual']).data)
def test_intensity_normalization():
data_in = np.random.rand(10, 10)
min_out, max_out = 0, 255
data_out = sct.deepseg_sc.core.scale_intensity(data_in, out_min=0, out_max=255)
assert data_in.shape == data_out.shape
assert np.min(data_out) >= min_out
assert np.max(data_out) <= max_out
def test_crop_image_around_centerline():
input_shape = (100, 100, 100)
crop_size = 20
data = np.random.rand(input_shape[0], input_shape[1], input_shape[2])
affine = np.eye(4)
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
ctr, _, _ = dummy_centerline(size_arr=input_shape)
_, _, _, img_out = sct.deepseg_sc.core.crop_image_around_centerline(
im_in=img.copy(), ctr_in=ctr.copy(), crop_size=crop_size)
img_in_z0 = img.data[:, :, 0]
x_ctr_z0, y_ctr_z0 = np.where(ctr.data[:, :, 0])[0][0], np.where(ctr.data[:, :, 0])[1][0]
x_start, x_end = sct.deepseg_sc.core._find_crop_start_end(x_ctr_z0, crop_size, img.dim[0])
y_start, y_end = sct.deepseg_sc.core._find_crop_start_end(y_ctr_z0, crop_size, img.dim[1])
img_in_z0_crop = img_in_z0[x_start:x_end, y_start:y_end]
assert img_out.data.shape == (crop_size, crop_size, input_shape[2])
assert np.allclose(img_in_z0_crop, img_out.data[:, :, 0])
def test_uncrop_image():
input_shape = (100, 100, 100)
crop_size = 20
data_crop = np.random.randint(0, 2, size=(crop_size, crop_size, input_shape[2]))
data_in = np.random.randint(0, 1000, size=input_shape)
x_crop_lst = list(np.random.randint(0, input_shape[0] - crop_size, input_shape[2]))
y_crop_lst = list(np.random.randint(0, input_shape[1] - crop_size, input_shape[2]))
z_crop_lst = range(input_shape[2])
affine = np.eye(4)
nii = nib.nifti1.Nifti1Image(data_in, affine)
img_in = Image(data_in, hdr=nii.header, dim=nii.header.get_data_shape())
img_uncrop = sct.deepseg_sc.core.uncrop_image(
ref_in=img_in, data_crop=data_crop, x_crop_lst=x_crop_lst, y_crop_lst=y_crop_lst, z_crop_lst=z_crop_lst)
assert img_uncrop.data.shape == input_shape
z_rand = np.random.randint(0, input_shape[2])
assert np.allclose(img_uncrop.data[x_crop_lst[z_rand]:x_crop_lst[z_rand] + crop_size,
y_crop_lst[z_rand]:y_crop_lst[z_rand] + crop_size,
z_rand],
data_crop[:, :, z_rand])
| mit | 0b5ed790574027d0fc0751a4b17a2536 | 38.12 | 113 | 0.643916 | 2.737579 | false | true | false | false |
darknessomi/musicbox | NEMbox/player.py | 1 | 19114 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: omi
# @Date: 2014-07-15 15:48:27
# @Last Modified by: AlanAlbert
# @Last Modified time: 2018-11-21 14:00:00
"""
网易云音乐 Player
"""
# Let's make some noise
import os
import random
import subprocess
import threading
import time
from . import logger
from .api import NetEase
from .cache import Cache
from .config import Config
from .kill_thread import stop_thread
from .storage import Storage
from .ui import Ui
from .utils import notify
log = logger.getLogger(__name__)
class Player(object):
MODE_ORDERED = 0
MODE_ORDERED_LOOP = 1
MODE_SINGLE_LOOP = 2
MODE_RANDOM = 3
MODE_RANDOM_LOOP = 4
SUBPROCESS_LIST = []
MUSIC_THREADS = []
def __init__(self):
self.config = Config()
self.ui = Ui()
self.popen_handler = None
# flag stop, prevent thread start
self.playing_flag = False
self.refresh_url_flag = False
self.process_length = 0
self.process_location = 0
self.storage = Storage()
self.cache = Cache()
self.end_callback = None
self.playing_song_changed_callback = None
self.api = NetEase()
self.playinfo_starts = time.time()
@property
def info(self):
return self.storage.database["player_info"]
@property
def songs(self):
return self.storage.database["songs"]
@property
def index(self):
return self.info["idx"]
@property
def list(self):
return self.info["player_list"]
@property
def order(self):
return self.info["playing_order"]
@property
def mode(self):
return self.info["playing_mode"]
@property
def is_ordered_mode(self):
return self.mode == Player.MODE_ORDERED
@property
def is_ordered_loop_mode(self):
return self.mode == Player.MODE_ORDERED_LOOP
@property
def is_single_loop_mode(self):
return self.mode == Player.MODE_SINGLE_LOOP
@property
def is_random_mode(self):
return self.mode == Player.MODE_RANDOM
@property
def is_random_loop_mode(self):
return self.mode == Player.MODE_RANDOM_LOOP
@property
def config_notifier(self):
return self.config.get("notifier")
@property
def config_mpg123(self):
return self.config.get("mpg123_parameters")
@property
def current_song(self):
if not self.songs:
return {}
if not self.is_index_valid:
return {}
song_id = self.list[self.index]
return self.songs.get(song_id, {})
@property
def playing_id(self):
return self.current_song.get("song_id")
@property
def playing_name(self):
return self.current_song.get("song_name")
@property
def is_empty(self):
return len(self.list) == 0
@property
def is_index_valid(self):
return 0 <= self.index < len(self.list)
def notify_playing(self):
if not self.current_song:
return
if not self.config_notifier:
return
song = self.current_song
notify(
"正在播放: {}\n{}-{}".format(
song["song_name"], song["artist"], song["album_name"]
)
)
def notify_copyright_issue(self):
log.warning(
"Song {} is unavailable due to copyright issue.".format(self.playing_id)
)
notify("版权限制,无法播放此歌曲")
def change_mode(self, step=1):
self.info["playing_mode"] = (self.info["playing_mode"] + step) % 5
def build_playinfo(self):
if not self.current_song:
return
self.ui.build_playinfo(
self.current_song["song_name"],
self.current_song["artist"],
self.current_song["album_name"],
self.current_song["quality"],
self.playinfo_starts,
pause=not self.playing_flag,
)
def add_songs(self, songs):
for song in songs:
song_id = str(song["song_id"])
self.info["player_list"].append(song_id)
if song_id in self.songs:
self.songs[song_id].update(song)
else:
self.songs[song_id] = song
def refresh_urls(self):
songs = self.api.dig_info(self.list, "refresh_urls")
if songs:
for song in songs:
song_id = str(song["song_id"])
if song_id in self.songs:
self.songs[song_id]["mp3_url"] = song["mp3_url"]
self.songs[song_id]["expires"] = song["expires"]
self.songs[song_id]["get_time"] = song["get_time"]
else:
self.songs[song_id] = song
self.refresh_url_flag = True
def stop(self):
if not hasattr(self.popen_handler, "poll") or self.popen_handler.poll():
return
self.playing_flag = False
try:
if not self.popen_handler.poll() and not self.popen_handler.stdin.closed:
self.popen_handler.stdin.write(b"Q\n")
self.popen_handler.stdin.flush()
self.popen_handler.communicate()
self.popen_handler.kill()
except Exception as e:
log.warn(e)
finally:
for thread_i in range(0, len(self.MUSIC_THREADS) - 1):
if self.MUSIC_THREADS[thread_i].is_alive():
try:
stop_thread(self.MUSIC_THREADS[thread_i])
except Exception as e:
log.warn(e)
pass
def tune_volume(self, up=0):
try:
if self.popen_handler.poll():
return
except Exception as e:
log.warn("Unable to tune volume: " + str(e))
return
new_volume = self.info["playing_volume"] + up
# if new_volume > 100:
# new_volume = 100
if new_volume < 0:
new_volume = 0
self.info["playing_volume"] = new_volume
try:
self.popen_handler.stdin.write(
"V {}\n".format(self.info["playing_volume"]).encode()
)
self.popen_handler.stdin.flush()
except Exception as e:
log.warn(e)
def switch(self):
if not self.popen_handler:
return
if self.popen_handler.poll():
return
self.playing_flag = not self.playing_flag
if not self.popen_handler.stdin.closed:
self.popen_handler.stdin.write(b"P\n")
self.popen_handler.stdin.flush()
self.playinfo_starts = time.time()
self.build_playinfo()
def run_mpg123(self, on_exit, url, expires=-1, get_time=-1):
para = ["mpg123", "-R"] + self.config_mpg123
self.popen_handler = subprocess.Popen(
para, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
if not url:
self.notify_copyright_issue()
if not self.is_single_loop_mode:
self.next()
else:
self.stop()
return
self.tune_volume()
try:
self.popen_handler.stdin.write(b"L " + url.encode("utf-8") + b"\n")
self.popen_handler.stdin.flush()
except:
pass
strout = " "
copyright_issue_flag = False
frame_cnt = 0
while True:
# Check the handler/stdin/stdout
if not hasattr(self.popen_handler, "poll") or self.popen_handler.poll():
break
if self.popen_handler.stdout.closed:
break
# try to read the stdout of mpg123
try:
stroutlines = self.popen_handler.stdout.readline()
except Exception as e:
log.warn(e)
break
if not stroutlines:
strout = " "
break
else:
strout_new = stroutlines.decode().strip()
if strout_new[:2] != strout[:2]:
# if status of mpg123 changed
for thread_i in range(0, len(self.MUSIC_THREADS) - 1):
if self.MUSIC_THREADS[thread_i].is_alive():
try:
stop_thread(self.MUSIC_THREADS[thread_i])
except Exception as e:
log.warn(e)
strout = strout_new
# Update application status according to mpg123 output
if strout[:2] == "@F":
# playing, update progress
out = strout.split(" ")
frame_cnt += 1
self.process_location = float(out[3])
self.process_length = int(float(out[3]) + float(out[4]))
elif strout[:2] == "@E":
self.playing_flag = True
if (
expires >= 0
and get_time >= 0
and time.time() - expires - get_time >= 0
):
# 刷新URL,设 self.refresh_url_flag = True
self.refresh_urls()
else:
# copyright issue raised, next if not single loop
copyright_issue_flag = True
self.notify_copyright_issue()
break
elif strout == "@P 0" and frame_cnt:
# normally end, moving to next
self.playing_flag = True
copyright_issue_flag = False
break
elif strout == "@P 0":
# copyright issue raised, next if not single loop
self.playing_flag = True
copyright_issue_flag = True
self.notify_copyright_issue()
break
# Ideal behavior:
# if refresh_url_flag are set, then replay.
# if not, do action like following:
# [self.playing_flag, copyright_issue_flag, self.is_single_loop_mode]: function()
# [0, 0, 0]: self.stop()
# [0, 0, 1]: self.stop()
# [0, 1, 0]: self.stop()
# [0, 1, 1]: self.stop()
# [1, 0, 0]: self.next()
# [1, 0, 1]: self.next()
# [1, 1, 0]: self.next()
# [1, 1, 1]: self.stop()
# Do corresponding action according to status
if self.playing_flag and self.refresh_url_flag:
self.stop() # Will set self.playing_flag = False
# So set the playing_flag here to be True is necessary
# to keep the play/pause status right
self.playing_flag = True
self.start_playing(lambda: 0, self.current_song)
self.refresh_url_flag = False
else:
# When no replay are needed
if not self.playing_flag:
self.stop()
elif copyright_issue_flag and self.is_single_loop_mode:
self.stop()
else:
self.next()
def download_lyric(self, is_transalted=False):
key = "lyric" if not is_transalted else "tlyric"
if key not in self.songs[str(self.playing_id)]:
self.songs[str(self.playing_id)][key] = []
if len(self.songs[str(self.playing_id)][key]) > 0:
return
if not is_transalted:
lyric = self.api.song_lyric(self.playing_id)
else:
lyric = self.api.song_tlyric(self.playing_id)
self.songs[str(self.playing_id)][key] = lyric
def download_song(self, song_id, song_name, artist, url):
def write_path(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.add(song_id, song_name, artist, url, write_path)
self.cache.start_download()
def start_playing(self, on_exit, args):
"""
Runs the given args in subprocess.Popen, and then calls the function
on_exit when the subprocess completes.
on_exit is a callable object, and args is a lists/tuple of args
that would give to subprocess.Popen.
"""
# print(args.get('cache'))
if "cache" in args.keys() and os.path.isfile(args["cache"]):
thread = threading.Thread(
target=self.run_mpg123, args=(on_exit, args["cache"])
)
else:
thread = threading.Thread(
target=self.run_mpg123,
args=(on_exit, args["mp3_url"], args["expires"], args["get_time"]),
)
cache_thread = threading.Thread(
target=self.download_song,
args=(
args["song_id"],
args["song_name"],
args["artist"],
args["mp3_url"],
),
)
cache_thread.start()
thread.start()
self.MUSIC_THREADS.append(thread)
self.MUSIC_THREADS = [i for i in self.MUSIC_THREADS if i.is_alive()]
lyric_download_thread = threading.Thread(target=self.download_lyric)
lyric_download_thread.start()
tlyric_download_thread = threading.Thread(
target=self.download_lyric, args=(True,)
)
tlyric_download_thread.start()
# returns immediately after the thread starts
return thread
def replay(self):
if not self.is_index_valid:
self.stop()
if self.end_callback:
log.debug("Callback")
self.end_callback()
return
if not self.current_song:
return
self.playing_flag = True
self.playinfo_starts = time.time()
self.build_playinfo()
self.notify_playing()
self.start_playing(lambda: 0, self.current_song)
def shuffle_order(self):
del self.order[:]
self.order.extend(list(range(0, len(self.list))))
random.shuffle(self.order)
self.info["random_index"] = 0
def new_player_list(self, type, title, datalist, offset):
self.info["player_list_type"] = type
self.info["player_list_title"] = title
# self.info['idx'] = offset
self.info["player_list"] = []
self.info["playing_order"] = []
self.info["random_index"] = 0
self.add_songs(datalist)
def append_songs(self, datalist):
self.add_songs(datalist)
# switch_flag为true表示:
# 在播放列表中 || 当前所在列表类型不在"songs"、"djprograms"、"fmsongs"中
def play_or_pause(self, idx, switch_flag):
if self.is_empty:
return
# if same "list index" and "playing index" --> same song :: pause/resume it
if self.index == idx and switch_flag:
if not self.popen_handler:
self.replay()
else:
self.switch()
else:
self.info["idx"] = idx
self.stop()
self.replay()
def _swap_song(self):
now_songs = self.order.index(self.index)
self.order[0], self.order[now_songs] = self.order[now_songs], self.order[0]
def _need_to_shuffle(self):
playing_order = self.order
random_index = self.info["random_index"]
if (
random_index >= len(playing_order)
or playing_order[random_index] != self.index
):
return True
else:
return False
def next_idx(self):
if not self.is_index_valid:
return self.stop()
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
# make sure self.index will not over
if self.info["idx"] < playlist_len:
self.info["idx"] += 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.index + 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
# When you regenerate playing list
# you should keep previous song same.
self._swap_song()
playing_order_len = len(self.order)
self.info["random_index"] += 1
# Out of border
if self.mode == Player.MODE_RANDOM_LOOP:
self.info["random_index"] %= playing_order_len
# Random but not loop, out of border, stop playing.
if self.info["random_index"] >= playing_order_len:
self.info["idx"] = playlist_len
else:
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def next(self):
self.stop()
self.next_idx()
self.replay()
def prev_idx(self):
if not self.is_index_valid:
self.stop()
return
playlist_len = len(self.list)
if self.mode == Player.MODE_ORDERED:
if self.info["idx"] > 0:
self.info["idx"] -= 1
elif self.mode == Player.MODE_ORDERED_LOOP:
self.info["idx"] = (self.info["idx"] - 1) % playlist_len
elif self.mode == Player.MODE_SINGLE_LOOP:
self.info["idx"] = self.info["idx"]
else:
playing_order_len = len(self.order)
if self._need_to_shuffle():
self.shuffle_order()
playing_order_len = len(self.order)
self.info["random_index"] -= 1
if self.info["random_index"] < 0:
if self.mode == Player.MODE_RANDOM:
self.info["random_index"] = 0
else:
self.info["random_index"] %= playing_order_len
self.info["idx"] = self.order[self.info["random_index"]]
if self.playing_song_changed_callback is not None:
self.playing_song_changed_callback()
def prev(self):
self.stop()
self.prev_idx()
self.replay()
def shuffle(self):
self.stop()
self.info["playing_mode"] = Player.MODE_RANDOM
self.shuffle_order()
self.info["idx"] = self.info["playing_order"][self.info["random_index"]]
self.replay()
def volume_up(self):
self.tune_volume(5)
def volume_down(self):
self.tune_volume(-5)
def update_size(self):
self.ui.update_size()
self.build_playinfo()
def cache_song(self, song_id, song_name, artist, song_url):
def on_exit(song_id, path):
self.songs[str(song_id)]["cache"] = path
self.cache.enable = False
self.cache.enable = True
self.cache.add(song_id, song_name, artist, song_url, on_exit)
self.cache.start_download()
| mit | a4149507d41f75797ee7b4f95d3b0479 | 30.643927 | 91 | 0.52587 | 3.856824 | false | false | false | false |
darknessomi/musicbox | NEMbox/const.py | 1 | 1349 | # encoding: UTF-8
# KenHuang: 使配置文件夹符合XDG标准
import os
class Constant(object):
if "XDG_CONFIG_HOME" in os.environ:
conf_dir = os.path.join(os.environ["XDG_CONFIG_HOME"], "netease-musicbox")
else:
conf_dir = os.path.join(os.path.expanduser("~"), ".netease-musicbox")
config_path = os.path.join(conf_dir, "config.json")
if "XDG_CACHE_HOME" in os.environ:
cacheDir = os.path.join(os.environ["XDG_CACHE_HOME"], "netease-musicbox")
if not os.path.exists(cacheDir):
os.mkdir(cacheDir)
download_dir = os.path.join(cacheDir, "cached")
cache_path = os.path.join(cacheDir, "nemcache")
else:
download_dir = os.path.join(conf_dir, "cached")
cache_path = os.path.join(conf_dir, "nemcache")
if "XDG_DATA_HOME" in os.environ:
dataDir = os.path.join(os.environ["XDG_DATA_HOME"], "netease-musicbox")
if not os.path.exists(dataDir):
os.mkdir(dataDir)
cookie_path = os.path.join(dataDir, "cookie.txt")
log_path = os.path.join(dataDir, "musicbox.log")
storage_path = os.path.join(dataDir, "database.json")
else:
cookie_path = os.path.join(conf_dir, "cookie.txt")
log_path = os.path.join(conf_dir, "musicbox.log")
storage_path = os.path.join(conf_dir, "database.json")
| mit | 5f028c946f762b78b800b5f57c7e95c2 | 41.870968 | 82 | 0.62453 | 2.876623 | false | true | false | false |
bethgelab/foolbox | foolbox/attacks/carlini_wagner.py | 1 | 8195 | from typing import Union, Tuple, Any, Optional
from functools import partial
import numpy as np
import eagerpy as ep
from ..devutils import flatten
from ..devutils import atleast_kd
from ..types import Bounds
from ..models import Model
from ..distances import l2
from ..criteria import Misclassification
from ..criteria import TargetedMisclassification
from .base import MinimizationAttack
from .base import T
from .base import get_criterion
from .base import raise_if_kwargs
from .base import verify_input_bounds
from .gradient_descent_base import AdamOptimizer
class L2CarliniWagnerAttack(MinimizationAttack):
"""Implementation of the Carlini & Wagner L2 Attack. [#Carl16]_
Args:
binary_search_steps : Number of steps to perform in the binary search
over the const c.
steps : Number of optimization steps within each binary search step.
stepsize : Stepsize to update the examples.
confidence : Confidence required for an example to be marked as adversarial.
Controls the gap between example and decision boundary.
initial_const : Initial value of the const c with which the binary search starts.
abort_early : Stop inner search as soons as an adversarial example has been found.
Does not affect the binary search over the const c.
References:
.. [#Carl16] Nicholas Carlini, David Wagner, "Towards evaluating the robustness of
neural networks. In 2017 ieee symposium on security and privacy"
https://arxiv.org/abs/1608.04644
"""
distance = l2
def __init__(
self,
binary_search_steps: int = 9,
steps: int = 10000,
stepsize: float = 1e-2,
confidence: float = 0,
initial_const: float = 1e-3,
abort_early: bool = True,
):
self.binary_search_steps = binary_search_steps
self.steps = steps
self.stepsize = stepsize
self.confidence = confidence
self.initial_const = initial_const
self.abort_early = abort_early
def run(
self,
model: Model,
inputs: T,
criterion: Union[Misclassification, TargetedMisclassification, T],
*,
early_stop: Optional[float] = None,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x, restore_type = ep.astensor_(inputs)
criterion_ = get_criterion(criterion)
del inputs, criterion, kwargs
verify_input_bounds(x, model)
N = len(x)
if isinstance(criterion_, Misclassification):
targeted = False
classes = criterion_.labels
change_classes_logits = self.confidence
elif isinstance(criterion_, TargetedMisclassification):
targeted = True
classes = criterion_.target_classes
change_classes_logits = -self.confidence
else:
raise ValueError("unsupported criterion")
def is_adversarial(perturbed: ep.Tensor, logits: ep.Tensor) -> ep.Tensor:
if change_classes_logits != 0:
logits += ep.onehot_like(logits, classes, value=change_classes_logits)
return criterion_(perturbed, logits)
if classes.shape != (N,):
name = "target_classes" if targeted else "labels"
raise ValueError(
f"expected {name} to have shape ({N},), got {classes.shape}"
)
bounds = model.bounds
to_attack_space = partial(_to_attack_space, bounds=bounds)
to_model_space = partial(_to_model_space, bounds=bounds)
x_attack = to_attack_space(x)
reconstsructed_x = to_model_space(x_attack)
rows = range(N)
def loss_fun(
delta: ep.Tensor, consts: ep.Tensor
) -> Tuple[ep.Tensor, Tuple[ep.Tensor, ep.Tensor]]:
assert delta.shape == x_attack.shape
assert consts.shape == (N,)
x = to_model_space(x_attack + delta)
logits = model(x)
if targeted:
c_minimize = best_other_classes(logits, classes)
c_maximize = classes # target_classes
else:
c_minimize = classes # labels
c_maximize = best_other_classes(logits, classes)
is_adv_loss = logits[rows, c_minimize] - logits[rows, c_maximize]
assert is_adv_loss.shape == (N,)
is_adv_loss = is_adv_loss + self.confidence
is_adv_loss = ep.maximum(0, is_adv_loss)
is_adv_loss = is_adv_loss * consts
squared_norms = flatten(x - reconstsructed_x).square().sum(axis=-1)
loss = is_adv_loss.sum() + squared_norms.sum()
return loss, (x, logits)
loss_aux_and_grad = ep.value_and_grad_fn(x, loss_fun, has_aux=True)
consts = self.initial_const * np.ones((N,))
lower_bounds = np.zeros((N,))
upper_bounds = np.inf * np.ones((N,))
best_advs = ep.zeros_like(x)
best_advs_norms = ep.full(x, (N,), ep.inf)
# the binary search searches for the smallest consts that produce adversarials
for binary_search_step in range(self.binary_search_steps):
if (
binary_search_step == self.binary_search_steps - 1
and self.binary_search_steps >= 10
):
# in the last binary search step, repeat the search once
consts = np.minimum(upper_bounds, 1e10)
# create a new optimizer find the delta that minimizes the loss
delta = ep.zeros_like(x_attack)
optimizer = AdamOptimizer(delta, self.stepsize)
# tracks whether adv with the current consts was found
found_advs = np.full((N,), fill_value=False)
loss_at_previous_check = np.inf
consts_ = ep.from_numpy(x, consts.astype(np.float32))
for step in range(self.steps):
loss, (perturbed, logits), gradient = loss_aux_and_grad(delta, consts_)
delta -= optimizer(gradient)
if self.abort_early and step % (np.ceil(self.steps / 10)) == 0:
# after each tenth of the overall steps, check progress
if not (loss <= 0.9999 * loss_at_previous_check):
break # stop Adam if there has been no progress
loss_at_previous_check = loss
found_advs_iter = is_adversarial(perturbed, logits)
found_advs = np.logical_or(found_advs, found_advs_iter.numpy())
norms = flatten(perturbed - x).norms.l2(axis=-1)
closer = norms < best_advs_norms
new_best = ep.logical_and(closer, found_advs_iter)
new_best_ = atleast_kd(new_best, best_advs.ndim)
best_advs = ep.where(new_best_, perturbed, best_advs)
best_advs_norms = ep.where(new_best, norms, best_advs_norms)
upper_bounds = np.where(found_advs, consts, upper_bounds)
lower_bounds = np.where(found_advs, lower_bounds, consts)
consts_exponential_search = consts * 10
consts_binary_search = (lower_bounds + upper_bounds) / 2
consts = np.where(
np.isinf(upper_bounds), consts_exponential_search, consts_binary_search
)
return restore_type(best_advs)
def best_other_classes(logits: ep.Tensor, exclude: ep.Tensor) -> ep.Tensor:
other_logits = logits - ep.onehot_like(logits, exclude, value=ep.inf)
return other_logits.argmax(axis=-1)
def _to_attack_space(x: ep.Tensor, *, bounds: Bounds) -> ep.Tensor:
min_, max_ = bounds
a = (min_ + max_) / 2
b = (max_ - min_) / 2
x = (x - a) / b # map from [min_, max_] to [-1, +1]
x = x * 0.999999 # from [-1, +1] to approx. (-1, +1)
x = x.arctanh() # from (-1, +1) to (-inf, +inf)
return x
def _to_model_space(x: ep.Tensor, *, bounds: Bounds) -> ep.Tensor:
min_, max_ = bounds
x = x.tanh() # from (-inf, +inf) to (-1, +1)
a = (min_ + max_) / 2
b = (max_ - min_) / 2
x = x * b + a # map from (-1, +1) to (min_, max_)
return x
| mit | e74812413b9c40075c72a9ff2932a35a | 35.748879 | 90 | 0.589262 | 3.719927 | false | false | false | false |
bethgelab/foolbox | foolbox/attacks/contrast_min.py | 1 | 4045 | from typing import Union, Any, Optional
import eagerpy as ep
from ..devutils import atleast_kd
from ..models import Model
from ..criteria import Criterion
from ..distances import Distance
from .base import FlexibleDistanceMinimizationAttack
from .base import T
from .base import get_is_adversarial
from .base import get_criterion
from .base import raise_if_kwargs
from .base import verify_input_bounds
class BinarySearchContrastReductionAttack(FlexibleDistanceMinimizationAttack):
"""Reduces the contrast of the input using a binary search to find the
smallest adversarial perturbation
Args:
distance : Distance measure for which minimal adversarial examples are searched.
binary_search_steps : Number of iterations in the binary search.
This controls the precision of the results.
target : Target relative to the bounds from 0 (min) to 1 (max)
towards which the contrast is reduced
"""
def __init__(
self,
*,
distance: Optional[Distance] = None,
binary_search_steps: int = 15,
target: float = 0.5,
):
super().__init__(distance=distance)
self.binary_search_steps = binary_search_steps
self.target = target
def run(
self,
model: Model,
inputs: T,
criterion: Union[Criterion, T],
*,
early_stop: Optional[float] = None,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x, restore_type = ep.astensor_(inputs)
del inputs, kwargs
verify_input_bounds(x, model)
criterion = get_criterion(criterion)
is_adversarial = get_is_adversarial(criterion, model)
min_, max_ = model.bounds
target = min_ + self.target * (max_ - min_)
direction = target - x
lower_bound = ep.zeros(x, len(x))
upper_bound = ep.ones(x, len(x))
epsilons = lower_bound
for _ in range(self.binary_search_steps):
eps = atleast_kd(epsilons, x.ndim)
is_adv = is_adversarial(x + eps * direction)
lower_bound = ep.where(is_adv, lower_bound, epsilons)
upper_bound = ep.where(is_adv, epsilons, upper_bound)
epsilons = (lower_bound + upper_bound) / 2
epsilons = upper_bound
eps = atleast_kd(epsilons, x.ndim)
xp = x + eps * direction
return restore_type(xp)
class LinearSearchContrastReductionAttack(FlexibleDistanceMinimizationAttack):
"""Reduces the contrast of the input using a linear search to find the
smallest adversarial perturbation"""
def __init__(
self,
*,
distance: Optional[Distance] = None,
steps: int = 1000,
target: float = 0.5,
):
super().__init__(distance=distance)
self.steps = steps
self.target = target
def run(
self,
model: Model,
inputs: T,
criterion: Union[Criterion, T],
*,
early_stop: Optional[float] = None,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x, restore_type = ep.astensor_(inputs)
del inputs, kwargs
verify_input_bounds(x, model)
criterion = get_criterion(criterion)
is_adversarial = get_is_adversarial(criterion, model)
min_, max_ = model.bounds
target = min_ + self.target * (max_ - min_)
direction = target - x
best = ep.ones(x, len(x))
epsilon = 0.0
stepsize = 1.0 / self.steps
for _ in range(self.steps):
# TODO: reduce the batch size to the ones that have not yet been sucessful
is_adv = is_adversarial(x + epsilon * direction)
is_best_adv = ep.logical_and(is_adv, best == 1)
best = ep.where(is_best_adv, epsilon, best)
if (best < 1).all():
break # pragma: no cover
epsilon += stepsize
eps = atleast_kd(best, x.ndim)
xp = x + eps * direction
return restore_type(xp)
| mit | 062ab19dbcbfad8cf55ef7af5af39001 | 28.742647 | 88 | 0.596292 | 3.816038 | false | false | false | false |
bethgelab/foolbox | tests/test_gen_attack_utils.py | 1 | 1233 | import eagerpy as ep
import numpy as np
import pytest
from typing import Any
from foolbox.attacks.gen_attack_utils import rescale_images
def test_rescale_axis(request: Any, dummy: ep.Tensor) -> None:
backend = request.config.option.backend
if backend == "numpy":
pytest.skip()
x_np = np.random.uniform(0.0, 1.0, size=(16, 3, 64, 64))
x_np_ep = ep.astensor(x_np)
x_up_np_ep = rescale_images(x_np_ep, (16, 3, 128, 128), 1)
x_up_np = x_up_np_ep.numpy()
x = ep.from_numpy(dummy, x_np)
x_ep = ep.astensor(x)
x_up_ep = rescale_images(x_ep, (16, 3, 128, 128), 1)
x_up = x_up_ep.numpy()
assert np.allclose(x_up_np, x_up, atol=1e-5)
def test_rescale_axis_nhwc(request: Any, dummy: ep.Tensor) -> None:
backend = request.config.option.backend
if backend == "numpy":
pytest.skip()
x_np = np.random.uniform(0.0, 1.0, size=(16, 64, 64, 3))
x_np_ep = ep.astensor(x_np)
x_up_np_ep = rescale_images(x_np_ep, (16, 128, 128, 3), -1)
x_up_np = x_up_np_ep.numpy()
x = ep.from_numpy(dummy, x_np)
x_ep = ep.astensor(x)
x_up_ep = rescale_images(x_ep, (16, 128, 128, 3), -1)
x_up = x_up_ep.numpy()
assert np.allclose(x_up_np, x_up, atol=1e-5)
| mit | 25ec81306ad391d187ca65d7ee96878e | 28.357143 | 67 | 0.605028 | 2.461078 | false | true | false | false |
bethgelab/foolbox | foolbox/models/base.py | 1 | 6321 | from typing import TypeVar, Callable, Optional, Tuple, Any
from abc import ABC, abstractmethod
import copy
import eagerpy as ep
from ..types import Bounds, BoundsInput, Preprocessing
from ..devutils import atleast_kd
T = TypeVar("T")
PreprocessArgs = Tuple[Optional[ep.Tensor], Optional[ep.Tensor], Optional[int]]
class Model(ABC):
@property
@abstractmethod
def bounds(self) -> Bounds:
...
@abstractmethod # noqa: F811
def __call__(self, inputs: T) -> T:
"""Passes inputs through the model and returns the model's output"""
...
def transform_bounds(self, bounds: BoundsInput) -> "Model":
"""Returns a new model with the desired bounds and updates the preprocessing accordingly"""
# subclasses can provide more efficient implementations
return TransformBoundsWrapper(self, bounds)
class TransformBoundsWrapper(Model):
def __init__(self, model: Model, bounds: BoundsInput):
self._model = model
self._bounds = Bounds(*bounds)
@property
def bounds(self) -> Bounds:
return self._bounds
def __call__(self, inputs: T) -> T:
x, restore_type = ep.astensor_(inputs)
y = self._preprocess(x)
z = self._model(y)
return restore_type(z)
def transform_bounds(self, bounds: BoundsInput, inplace: bool = False) -> Model:
if inplace:
self._bounds = Bounds(*bounds)
return self
else:
# using the wrapped model instead of self to avoid
# unnessary sequences of wrappers
return TransformBoundsWrapper(self._model, bounds)
def _preprocess(self, inputs: ep.TensorType) -> ep.TensorType:
if self.bounds == self._model.bounds:
return inputs
# from bounds to (0, 1)
min_, max_ = self.bounds
x = (inputs - min_) / (max_ - min_)
# from (0, 1) to wrapped model bounds
min_, max_ = self._model.bounds
return x * (max_ - min_) + min_
@property
def data_format(self) -> Any:
return self._model.data_format # type: ignore
ModelType = TypeVar("ModelType", bound="ModelWithPreprocessing")
class ModelWithPreprocessing(Model):
def __init__( # type: ignore
self,
model: Callable[..., ep.types.NativeTensor],
bounds: BoundsInput,
dummy: ep.Tensor,
preprocessing: Preprocessing = None,
):
if not callable(model):
raise ValueError("expected model to be callable") # pragma: no cover
self._model = model
self._bounds = Bounds(*bounds)
self._dummy = dummy
self._preprocess_args = self._process_preprocessing(preprocessing)
@property
def bounds(self) -> Bounds:
return self._bounds
@property
def dummy(self) -> ep.Tensor:
return self._dummy
def __call__(self, inputs: T) -> T:
x, restore_type = ep.astensor_(inputs)
y = self._preprocess(x)
z = ep.astensor(self._model(y.raw))
return restore_type(z)
def transform_bounds(
self,
bounds: BoundsInput,
inplace: bool = False,
wrapper: bool = False,
) -> Model:
"""Returns a new model with the desired bounds and updates the preprocessing accordingly"""
# more efficient than the base class implementation because it avoids the additional wrapper
if wrapper:
if inplace:
raise ValueError("inplace and wrapper cannot both be True")
return super().transform_bounds(bounds)
if self.bounds == bounds:
if inplace:
return self
else:
return copy.copy(self)
a, b = self.bounds
c, d = bounds
f = (d - c) / (b - a)
mean, std, flip_axis = self._preprocess_args
if mean is None:
mean = ep.zeros(self._dummy, 1)
mean = f * (mean - a) + c
if std is None:
std = ep.ones(self._dummy, 1)
std = f * std
if inplace:
model = self
else:
model = copy.copy(self)
model._bounds = Bounds(*bounds)
model._preprocess_args = (mean, std, flip_axis)
return model
def _preprocess(self, inputs: ep.Tensor) -> ep.Tensor:
mean, std, flip_axis = self._preprocess_args
x = inputs
if flip_axis is not None:
x = x.flip(axis=flip_axis)
if mean is not None:
x = x - mean
if std is not None:
x = x / std
assert x.dtype == inputs.dtype
return x
def _process_preprocessing(self, preprocessing: Preprocessing) -> PreprocessArgs:
if preprocessing is None:
preprocessing = dict()
unsupported = set(preprocessing.keys()) - {"mean", "std", "axis", "flip_axis"}
if len(unsupported) > 0:
raise ValueError(f"unknown preprocessing key: {unsupported.pop()}")
mean = preprocessing.get("mean", None)
std = preprocessing.get("std", None)
axis = preprocessing.get("axis", None)
flip_axis = preprocessing.get("flip_axis", None)
def to_tensor(x: Any) -> Optional[ep.Tensor]:
if x is None:
return None
if isinstance(x, ep.Tensor):
return x
try:
y = ep.astensor(x) # might raise ValueError
if not isinstance(y, type(self._dummy)):
raise ValueError
return y
except ValueError:
return ep.from_numpy(self._dummy, x)
mean_ = to_tensor(mean)
std_ = to_tensor(std)
def apply_axis(x: Optional[ep.Tensor], axis: int) -> Optional[ep.Tensor]:
if x is None:
return None
if x.ndim != 1:
raise ValueError(f"non-None axis requires a 1D tensor, got {x.ndim}D")
if axis >= 0:
raise ValueError(
"expected axis to be None or negative, -1 refers to the last axis"
)
return atleast_kd(x, -axis)
if axis is not None:
mean_ = apply_axis(mean_, axis)
std_ = apply_axis(std_, axis)
return mean_, std_, flip_axis
| mit | ee2407bad57ea145f3b3b2cbad1df938 | 30.137931 | 100 | 0.5651 | 4.175033 | false | false | false | false |
bethgelab/foolbox | foolbox/distances.py | 1 | 2412 | from abc import ABC, abstractmethod
from typing import TypeVar
import eagerpy as ep
from .devutils import flatten
from .devutils import atleast_kd
T = TypeVar("T")
class Distance(ABC):
@abstractmethod
def __call__(self, reference: T, perturbed: T) -> T:
...
@abstractmethod
def clip_perturbation(self, references: T, perturbed: T, epsilon: float) -> T:
...
class LpDistance(Distance):
def __init__(self, p: float):
self.p = p
def __repr__(self) -> str:
return f"LpDistance({self.p})"
def __str__(self) -> str:
return f"L{self.p} distance"
def __call__(self, references: T, perturbed: T) -> T:
"""Calculates the distances from references to perturbed using the Lp norm.
Args:
references: A batch of reference inputs.
perturbed: A batch of perturbed inputs.
Returns:
A 1D tensor with the distances from references to perturbed.
"""
(x, y), restore_type = ep.astensors_(references, perturbed)
norms = ep.norms.lp(flatten(y - x), self.p, axis=-1)
return restore_type(norms)
def clip_perturbation(self, references: T, perturbed: T, epsilon: float) -> T:
"""Clips the perturbations to epsilon and returns the new perturbed
Args:
references: A batch of reference inputs.
perturbed: A batch of perturbed inputs.
Returns:
A tenosr like perturbed but with the perturbation clipped to epsilon.
"""
(x, y), restore_type = ep.astensors_(references, perturbed)
p = y - x
if self.p == ep.inf:
clipped_perturbation = ep.clip(p, -epsilon, epsilon)
return restore_type(x + clipped_perturbation)
norms = ep.norms.lp(flatten(p), self.p, axis=-1)
norms = ep.maximum(norms, 1e-12) # avoid divsion by zero
factor = epsilon / norms
factor = ep.minimum(1, factor) # clipping -> decreasing but not increasing
if self.p == 0:
if (factor == 1).all():
return perturbed
raise NotImplementedError("reducing L0 norms not yet supported")
factor = atleast_kd(factor, x.ndim)
clipped_perturbation = factor * p
return restore_type(x + clipped_perturbation)
l0 = LpDistance(0)
l1 = LpDistance(1)
l2 = LpDistance(2)
linf = LpDistance(ep.inf)
| mit | 1c1c8429df450bb0f35b47568d14943f | 30.324675 | 83 | 0.606136 | 3.573333 | false | false | false | false |
bethgelab/foolbox | examples/multiple_attacks_pytorch_resnet18.py | 1 | 2313 | #!/usr/bin/env python3
import torchvision.models as models
import eagerpy as ep
from foolbox import PyTorchModel, accuracy, samples
import foolbox.attacks as fa
import numpy as np
if __name__ == "__main__":
# instantiate a model (could also be a TensorFlow or JAX model)
model = models.resnet18(pretrained=True).eval()
preprocessing = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3)
fmodel = PyTorchModel(model, bounds=(0, 1), preprocessing=preprocessing)
# get data and test the model
# wrapping the tensors with ep.astensors is optional, but it allows
# us to work with EagerPy tensors in the following
images, labels = ep.astensors(*samples(fmodel, dataset="imagenet", batchsize=16))
clean_acc = accuracy(fmodel, images, labels)
print(f"clean accuracy: {clean_acc * 100:.1f} %")
print("")
attacks = [
fa.FGSM(),
fa.LinfPGD(),
fa.LinfBasicIterativeAttack(),
fa.LinfAdditiveUniformNoiseAttack(),
fa.LinfDeepFoolAttack(),
]
epsilons = [
0.0,
0.0005,
0.001,
0.0015,
0.002,
0.003,
0.005,
0.01,
0.02,
0.03,
0.1,
0.3,
0.5,
1.0,
]
print("epsilons")
print(epsilons)
print("")
attack_success = np.zeros((len(attacks), len(epsilons), len(images)), dtype=np.bool)
for i, attack in enumerate(attacks):
_, _, success = attack(fmodel, images, labels, epsilons=epsilons)
assert success.shape == (len(epsilons), len(images))
success_ = success.numpy()
assert success_.dtype == np.bool
attack_success[i] = success_
print(attack)
print(" ", 1.0 - success_.mean(axis=-1).round(2))
# calculate and report the robust accuracy (the accuracy of the model when
# it is attacked) using the best attack per sample
robust_accuracy = 1.0 - attack_success.max(axis=0).mean(axis=-1)
print("")
print("-" * 79)
print("")
print("worst case (best attack per-sample)")
print(" ", robust_accuracy.round(2))
print("")
print("robust accuracy for perturbations with")
for eps, acc in zip(epsilons, robust_accuracy):
print(f" Linf norm ≤ {eps:<6}: {acc.item() * 100:4.1f} %")
| mit | 5c3b3e0ec5c1849994597acfd332db14 | 30.657534 | 88 | 0.603635 | 3.349275 | false | false | false | false |
bethgelab/foolbox | foolbox/attacks/gradient_descent_base.py | 1 | 8297 | from typing import Union, Any, Optional, Callable, Tuple
from abc import ABC, abstractmethod
import eagerpy as ep
from ..devutils import flatten
from ..devutils import atleast_kd
from ..types import Bounds
from ..models.base import Model
from ..criteria import Misclassification, TargetedMisclassification
from ..distances import l1, l2, linf
from .base import FixedEpsilonAttack
from .base import T
from .base import get_criterion
from .base import raise_if_kwargs
from .base import verify_input_bounds
class Optimizer(ABC):
def __init__(self, x: ep.Tensor):
pass
@abstractmethod
def __call__(self, gradient: ep.Tensor) -> ep.Tensor:
pass
class AdamOptimizer(Optimizer):
def __init__(
self,
x: ep.Tensor,
stepsize: float,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-8,
):
self.stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = ep.zeros_like(x)
self.v = ep.zeros_like(x)
self.t = 0
def __call__(self, gradient: ep.Tensor) -> ep.Tensor:
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * gradient
self.v = self.beta2 * self.v + (1 - self.beta2) * gradient**2
bias_correction_1 = 1 - self.beta1**self.t
bias_correction_2 = 1 - self.beta2**self.t
m_hat = self.m / bias_correction_1
v_hat = self.v / bias_correction_2
return self.stepsize * m_hat / (ep.sqrt(v_hat) + self.epsilon)
class GDOptimizer(Optimizer):
def __init__(self, x: ep.Tensor, stepsize: float):
self.stepsize = stepsize
def __call__(
self,
gradient: ep.Tensor,
) -> ep.Tensor:
return self.stepsize * gradient
class BaseGradientDescent(FixedEpsilonAttack, ABC):
def __init__(
self,
*,
rel_stepsize: float,
abs_stepsize: Optional[float] = None,
steps: int,
random_start: bool,
):
self.rel_stepsize = rel_stepsize
self.abs_stepsize = abs_stepsize
self.steps = steps
self.random_start = random_start
def get_loss_fn(
self, model: Model, labels: ep.Tensor
) -> Callable[[ep.Tensor], ep.Tensor]:
# can be overridden by users
def loss_fn(inputs: ep.Tensor) -> ep.Tensor:
logits = model(inputs)
return ep.crossentropy(logits, labels).sum()
return loss_fn
def get_optimizer(self, x: ep.Tensor, stepsize: float) -> Optimizer:
# can be overridden by users
return GDOptimizer(x, stepsize)
def value_and_grad(
# can be overridden by users
self,
loss_fn: Callable[[ep.Tensor], ep.Tensor],
x: ep.Tensor,
) -> Tuple[ep.Tensor, ep.Tensor]:
return ep.value_and_grad(loss_fn, x)
def run(
self,
model: Model,
inputs: T,
criterion: Union[Misclassification, TargetedMisclassification, T],
*,
epsilon: float,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x0, restore_type = ep.astensor_(inputs)
criterion_ = get_criterion(criterion)
del inputs, criterion, kwargs
verify_input_bounds(x0, model)
# perform a gradient ascent (targeted attack) or descent (untargeted attack)
if isinstance(criterion_, Misclassification):
gradient_step_sign = 1.0
classes = criterion_.labels
elif hasattr(criterion_, "target_classes"):
gradient_step_sign = -1.0
classes = criterion_.target_classes # type: ignore
else:
raise ValueError("unsupported criterion")
loss_fn = self.get_loss_fn(model, classes)
if self.abs_stepsize is None:
stepsize = self.rel_stepsize * epsilon
else:
stepsize = self.abs_stepsize
optimizer = self.get_optimizer(x0, stepsize)
if self.random_start:
x = self.get_random_start(x0, epsilon)
x = ep.clip(x, *model.bounds)
else:
x = x0
for _ in range(self.steps):
_, gradients = self.value_and_grad(loss_fn, x)
gradients = self.normalize(gradients, x=x, bounds=model.bounds)
x = x + gradient_step_sign * optimizer(gradients)
x = self.project(x, x0, epsilon)
x = ep.clip(x, *model.bounds)
return restore_type(x)
@abstractmethod
def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
...
@abstractmethod
def normalize(
self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
) -> ep.Tensor:
...
@abstractmethod
def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
...
def clip_lp_norms(x: ep.Tensor, *, norm: float, p: float) -> ep.Tensor:
assert 0 < p < ep.inf
norms = flatten(x).norms.lp(p=p, axis=-1)
norms = ep.maximum(norms, 1e-12) # avoid divsion by zero
factor = ep.minimum(1, norm / norms) # clipping -> decreasing but not increasing
factor = atleast_kd(factor, x.ndim)
return x * factor
def normalize_lp_norms(x: ep.Tensor, *, p: float) -> ep.Tensor:
assert 0 < p < ep.inf
norms = flatten(x).norms.lp(p=p, axis=-1)
norms = ep.maximum(norms, 1e-12) # avoid divsion by zero
factor = 1 / norms
factor = atleast_kd(factor, x.ndim)
return x * factor
def uniform_l1_n_balls(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
# https://mathoverflow.net/a/9188
u = ep.uniform(dummy, (batch_size, n))
v = u.sort(axis=-1)
vp = ep.concatenate([ep.zeros(v, (batch_size, 1)), v[:, : n - 1]], axis=-1)
assert v.shape == vp.shape
x = v - vp
sign = ep.uniform(dummy, (batch_size, n), low=-1.0, high=1.0).sign()
return sign * x
def uniform_l2_n_spheres(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
x = ep.normal(dummy, (batch_size, n + 1))
r = x.norms.l2(axis=-1, keepdims=True)
s = x / r
return s
def uniform_l2_n_balls(dummy: ep.Tensor, batch_size: int, n: int) -> ep.Tensor:
"""Sampling from the n-ball
Implementation of the algorithm proposed by Voelker et al. [#Voel17]_
References:
.. [#Voel17] Voelker et al., 2017, Efficiently sampling vectors and coordinates
from the n-sphere and n-ball
http://compneuro.uwaterloo.ca/files/publications/voelker.2017.pdf
"""
s = uniform_l2_n_spheres(dummy, batch_size, n + 1)
b = s[:, :n]
return b
class L1BaseGradientDescent(BaseGradientDescent):
distance = l1
def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
batch_size, n = flatten(x0).shape
r = uniform_l1_n_balls(x0, batch_size, n).reshape(x0.shape)
return x0 + epsilon * r
def normalize(
self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
) -> ep.Tensor:
return normalize_lp_norms(gradients, p=1)
def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
return x0 + clip_lp_norms(x - x0, norm=epsilon, p=1)
class L2BaseGradientDescent(BaseGradientDescent):
distance = l2
def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
batch_size, n = flatten(x0).shape
r = uniform_l2_n_balls(x0, batch_size, n).reshape(x0.shape)
return x0 + epsilon * r
def normalize(
self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
) -> ep.Tensor:
return normalize_lp_norms(gradients, p=2)
def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
return x0 + clip_lp_norms(x - x0, norm=epsilon, p=2)
class LinfBaseGradientDescent(BaseGradientDescent):
distance = linf
def get_random_start(self, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
return x0 + ep.uniform(x0, x0.shape, -epsilon, epsilon)
def normalize(
self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
) -> ep.Tensor:
return gradients.sign()
def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
return x0 + ep.clip(x - x0, -epsilon, epsilon)
| mit | 0ca5b821b31fd4aac2596873731d9427 | 29.170909 | 87 | 0.601181 | 3.338833 | false | false | false | false |
bethgelab/foolbox | tests/test_brendel_bethge_attack.py | 1 | 1834 | from typing import Tuple, Union, List, Any
import eagerpy as ep
import foolbox as fbn
import foolbox.attacks as fa
from foolbox.devutils import flatten
from foolbox.attacks.brendel_bethge import BrendelBethgeAttack
import pytest
from conftest import ModeAndDataAndDescription
def get_attack_id(x: Tuple[BrendelBethgeAttack, Union[int, float]]) -> str:
return repr(x[0])
attacks: List[Tuple[fa.Attack, Union[int, float]]] = [
(fa.L0BrendelBethgeAttack(steps=20), 0),
(fa.L1BrendelBethgeAttack(steps=20), 1),
(fa.L2BrendelBethgeAttack(steps=20), 2),
(fa.LinfinityBrendelBethgeAttack(steps=20), ep.inf),
]
@pytest.mark.parametrize("attack_and_p", attacks, ids=get_attack_id)
def test_brendel_bethge_untargeted_attack(
request: Any,
fmodel_and_data_ext_for_attacks: ModeAndDataAndDescription,
attack_and_p: Tuple[BrendelBethgeAttack, Union[int, float]],
) -> None:
if request.config.option.skipslow:
pytest.skip()
(fmodel, x, y), real, low_dimensional_input = fmodel_and_data_ext_for_attacks
if isinstance(x, ep.NumPyTensor):
pytest.skip()
if low_dimensional_input:
pytest.skip()
x = (x - fmodel.bounds.lower) / (fmodel.bounds.upper - fmodel.bounds.lower)
fmodel = fmodel.transform_bounds((0, 1))
init_attack = fa.DatasetAttack()
init_attack.feed(fmodel, x)
init_advs = init_attack.run(fmodel, x, y)
attack, p = attack_and_p
advs = attack.run(fmodel, x, y, starting_points=init_advs)
init_norms = ep.norms.lp(flatten(init_advs - x), p=p, axis=-1)
norms = ep.norms.lp(flatten(advs - x), p=p, axis=-1)
is_smaller = norms < init_norms
assert fbn.accuracy(fmodel, advs, y) < fbn.accuracy(fmodel, x, y)
assert fbn.accuracy(fmodel, advs, y) <= fbn.accuracy(fmodel, init_advs, y)
assert is_smaller.any()
| mit | 5f192d5309ebf13c016066db2944debf | 30.084746 | 81 | 0.692475 | 2.778788 | false | true | false | false |
bethgelab/foolbox | foolbox/attacks/sparse_l1_descent_attack.py | 1 | 3358 | from typing import Optional
import eagerpy as ep
import numpy as np
from ..devutils import flatten
from ..devutils import atleast_kd
from ..types import Bounds
from .gradient_descent_base import L1BaseGradientDescent
from .gradient_descent_base import normalize_lp_norms
class SparseL1DescentAttack(L1BaseGradientDescent):
"""Sparse L1 Descent Attack [#Tra19]_.
Args:
rel_stepsize: Stepsize relative to epsilon.
abs_stepsize: If given, it takes precedence over rel_stepsize.
steps : Number of update steps.
random_start : Controls whether to randomly start within allowed epsilon ball.
References:
.. [#Tra19] Florian Tramèr, Dan Boneh, "Adversarial Training and
Robustness for Multiple Perturbations"
https://arxiv.org/abs/1904.13000
"""
def normalize(
self, gradients: ep.Tensor, *, x: ep.Tensor, bounds: Bounds
) -> ep.Tensor:
bad_pos = ep.logical_or(
ep.logical_and(x == bounds.lower, gradients < 0),
ep.logical_and(x == bounds.upper, gradients > 0),
)
gradients = ep.where(bad_pos, ep.zeros_like(gradients), gradients)
abs_gradients = gradients.abs()
quantiles = np.quantile(
flatten(abs_gradients).numpy(), q=self.quantile, axis=-1
)
keep = abs_gradients >= atleast_kd(
ep.from_numpy(gradients, quantiles), gradients.ndim
)
e = ep.where(keep, gradients.sign(), ep.zeros_like(gradients))
return normalize_lp_norms(e, p=1)
def project(self, x: ep.Tensor, x0: ep.Tensor, epsilon: float) -> ep.Tensor:
# based on https://github.com/ftramer/MultiRobustness/blob/ad41b63235d13b1b2a177c5f270ab9afa74eee69/pgd_attack.py#L110
delta = flatten(x - x0)
norms = delta.norms.l1(axis=-1)
if (norms <= epsilon).all():
return x
n, d = delta.shape
abs_delta = abs(delta)
mu = -ep.sort(-abs_delta, axis=-1)
cumsums = mu.cumsum(axis=-1)
js = 1.0 / ep.arange(x, 1, d + 1).astype(x.dtype)
temp = mu - js * (cumsums - epsilon)
guarantee_first = ep.arange(x, d).astype(x.dtype) / d
# guarantee_first are small values (< 1) that we add to the boolean
# tensor (only 0 and 1) to break the ties and always return the first
# argmin, i.e. the first value where the boolean tensor is 0
# (otherwise, this is not guaranteed on GPUs, see e.g. PyTorch)
rho = ep.argmin((temp > 0).astype(x.dtype) + guarantee_first, axis=-1)
theta = 1.0 / (1 + rho.astype(x.dtype)) * (cumsums[range(n), rho] - epsilon)
delta = delta.sign() * ep.maximum(abs_delta - theta[..., ep.newaxis], 0)
delta = delta.reshape(x.shape)
return x0 + delta
def __init__(
self,
*,
quantile: float = 0.99,
rel_stepsize: float = 0.2,
abs_stepsize: Optional[float] = None,
steps: int = 10,
random_start: bool = False,
):
super().__init__(
rel_stepsize=rel_stepsize,
abs_stepsize=abs_stepsize,
steps=steps,
random_start=random_start,
)
if not 0 <= quantile <= 1:
raise ValueError(f"quantile needs to be between 0 and 1, got {quantile}")
self.quantile = quantile
| mit | 2b87e0569324d009547be808da50b0bf | 36.719101 | 126 | 0.605302 | 3.526261 | false | false | false | false |
bethgelab/foolbox | foolbox/attacks/blended_noise.py | 1 | 3238 | from typing import Union, Optional, Any
import numpy as np
import eagerpy as ep
from ..devutils import atleast_kd
from ..distances import Distance
from .base import FlexibleDistanceMinimizationAttack
from .base import Model
from .base import Criterion
from .base import T
from .base import get_is_adversarial
from .base import get_criterion
from .base import raise_if_kwargs
import warnings
from .base import verify_input_bounds
class LinearSearchBlendedUniformNoiseAttack(FlexibleDistanceMinimizationAttack):
"""Blends the input with a uniform noise input until it is misclassified.
Args:
distance : Distance measure for which minimal adversarial examples are searched.
directions : Number of random directions in which the perturbation is searched.
steps : Number of blending steps between the original image and the random
directions.
"""
def __init__(
self,
*,
distance: Optional[Distance] = None,
directions: int = 1000,
steps: int = 1000,
):
super().__init__(distance=distance)
self.directions = directions
self.steps = steps
if directions <= 0:
raise ValueError("directions must be larger than 0")
def run(
self,
model: Model,
inputs: T,
criterion: Union[Criterion, Any] = None,
*,
early_stop: Optional[float] = None,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x, restore_type = ep.astensor_(inputs)
criterion_ = get_criterion(criterion)
del inputs, criterion, kwargs
verify_input_bounds(x, model)
is_adversarial = get_is_adversarial(criterion_, model)
min_, max_ = model.bounds
N = len(x)
for j in range(self.directions):
# random noise inputs tend to be classified into the same class,
# so we might need to make very many draws if the original class
# is that one
random_ = ep.uniform(x, x.shape, min_, max_)
is_adv_ = atleast_kd(is_adversarial(random_), x.ndim)
if j == 0:
random = random_
is_adv = is_adv_
else:
random = ep.where(is_adv, random, random_)
is_adv = is_adv.logical_or(is_adv_)
if is_adv.all():
break
if not is_adv.all():
warnings.warn(
f"{self.__class__.__name__} failed to draw sufficient random"
f" inputs that are adversarial ({is_adv.sum()} / {N})."
)
x0 = x
epsilons = np.linspace(0, 1, num=self.steps + 1, dtype=np.float32)
best = ep.ones(x, (N,))
for epsilon in epsilons:
x = (1 - epsilon) * x0 + epsilon * random
# TODO: due to limited floating point precision, clipping can be required
is_adv = is_adversarial(x)
epsilon = epsilon.item()
best = ep.minimum(ep.where(is_adv, epsilon, 1.0), best)
if (best < 1).all():
break
best = atleast_kd(best, x0.ndim)
x = (1 - best) * x0 + best * random
return restore_type(x)
| mit | d02b58d9437c3ea4fa8d5edb99269ab1 | 28.436364 | 88 | 0.579679 | 3.968137 | false | false | false | false |
bethgelab/foolbox | foolbox/external/clipping_aware_rescaling.py | 1 | 2461 | # Copyright (c) 2020, Jonas Rauber
#
# Licensed under the BSD 3-Clause License
#
# Last changed:
# * 2020-07-15
# * 2020-01-08
# * 2019-04-18
import eagerpy as ep
def l2_clipping_aware_rescaling(x, delta, eps: float, a: float = 0.0, b: float = 1.0): # type: ignore
"""Calculates eta such that norm(clip(x + eta * delta, a, b) - x) == eps.
Assumes x and delta have a batch dimension and eps, a, b, and p are
scalars. If the equation cannot be solved because eps is too large, the
left hand side is maximized.
Args:
x: A batch of inputs (PyTorch Tensor, TensorFlow Eager Tensor, NumPy
Array, JAX Array, or EagerPy Tensor).
delta: A batch of perturbation directions (same shape and type as x).
eps: The target norm (non-negative float).
a: The lower bound of the data domain (float).
b: The upper bound of the data domain (float).
Returns:
eta: A batch of scales with the same number of dimensions as x but all
axis == 1 except for the batch dimension.
"""
(x, delta), restore_fn = ep.astensors_(x, delta)
N = x.shape[0]
assert delta.shape[0] == N
rows = ep.arange(x, N)
delta2 = delta.square().reshape((N, -1))
space = ep.where(delta >= 0, b - x, x - a).reshape((N, -1))
f2 = space.square() / ep.maximum(delta2, 1e-20)
ks = ep.argsort(f2, axis=-1)
f2_sorted = f2[rows[:, ep.newaxis], ks]
m = ep.cumsum(delta2[rows[:, ep.newaxis], ks.flip(axis=1)], axis=-1).flip(axis=1)
dx = f2_sorted[:, 1:] - f2_sorted[:, :-1]
dx = ep.concatenate((f2_sorted[:, :1], dx), axis=-1)
dy = m * dx
y = ep.cumsum(dy, axis=-1)
c = y >= eps**2
# work-around to get first nonzero element in each row
f = ep.arange(x, c.shape[-1], 0, -1)
j = ep.argmax(c.astype(f.dtype) * f, axis=-1)
eta2 = f2_sorted[rows, j] - (y[rows, j] - eps**2) / m[rows, j]
# it can happen that for certain rows even the largest j is not large enough
# (i.e. c[:, -1] is False), then we will just use it (without any correction) as it's
# the best we can do (this should also be the only cases where m[j] can be
# 0 and they are thus not a problem)
eta2 = ep.where(c[:, -1], eta2, f2_sorted[:, -1])
eta = ep.sqrt(eta2)
eta = eta.reshape((-1,) + (1,) * (x.ndim - 1))
# xp = ep.clip(x + eta * delta, a, b)
# l2 = (xp - x).reshape((N, -1)).square().sum(axis=-1).sqrt()
return restore_fn(eta)
| mit | d7bd171f1beb94bbf1398f7ea05bf457 | 37.453125 | 102 | 0.595693 | 3.00122 | false | false | false | false |
bethgelab/foolbox | foolbox/attacks/binarization.py | 1 | 3895 | from typing import Union, Optional, Any
from typing_extensions import Literal
import eagerpy as ep
import numpy as np
from ..models import Model
from ..criteria import Criterion
from ..distances import Distance
from .base import FlexibleDistanceMinimizationAttack
from .base import T
from .base import get_is_adversarial
from .base import get_criterion
from .base import raise_if_kwargs
from .base import verify_input_bounds
class BinarizationRefinementAttack(FlexibleDistanceMinimizationAttack):
"""For models that preprocess their inputs by binarizing the
inputs, this attack can improve adversarials found by other
attacks. It does this by utilizing information about the
binarization and mapping values to the corresponding value in
the clean input or to the right side of the threshold.
Args:
threshold : The threshold used by the models binarization. If none,
defaults to (model.bounds()[1] - model.bounds()[0]) / 2.
included_in : Whether the threshold value itself belongs to the lower or
upper interval.
"""
def __init__(
self,
*,
distance: Optional[Distance] = None,
threshold: Optional[float] = None,
included_in: Union[Literal["lower"], Literal["upper"]] = "upper",
):
super().__init__(distance=distance)
self.threshold = threshold
self.included_in = included_in
def run(
self,
model: Model,
inputs: T,
criterion: Union[Criterion, T],
*,
early_stop: Optional[float] = None,
starting_points: Optional[T] = None,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
if starting_points is None:
raise ValueError("BinarizationRefinementAttack requires starting_points")
(o, x), restore_type = ep.astensors_(inputs, starting_points)
del inputs, starting_points, kwargs
verify_input_bounds(x, model)
criterion = get_criterion(criterion)
is_adversarial = get_is_adversarial(criterion, model)
if self.threshold is None:
min_, max_ = model.bounds
threshold = (min_ + max_) / 2.0
else:
threshold = self.threshold
assert o.dtype == x.dtype
nptype = o.reshape(-1)[0].numpy().dtype.type
if nptype not in [np.float16, np.float32, np.float64]:
raise ValueError( # pragma: no cover
f"expected dtype to be float16, float32 or float64, found '{nptype}'"
)
threshold = nptype(threshold)
offset = nptype(1.0)
if self.included_in == "lower":
lower_ = threshold
upper_ = np.nextafter(threshold, threshold + offset)
elif self.included_in == "upper":
lower_ = np.nextafter(threshold, threshold - offset)
upper_ = threshold
else:
raise ValueError(
f"expected included_in to be 'lower' or 'upper', found '{self.included_in}'"
)
assert lower_ < upper_
p = ep.full_like(o, ep.nan)
lower = ep.ones_like(o) * lower_
upper = ep.ones_like(o) * upper_
indices = ep.logical_and(o <= lower, x <= lower)
p = ep.where(indices, o, p)
indices = ep.logical_and(o <= lower, x >= upper)
p = ep.where(indices, upper, p)
indices = ep.logical_and(o >= upper, x <= lower)
p = ep.where(indices, lower, p)
indices = ep.logical_and(o >= upper, x >= upper)
p = ep.where(indices, o, p)
assert not ep.any(ep.isnan(p))
is_adv1 = is_adversarial(x)
is_adv2 = is_adversarial(p)
if (is_adv1 != is_adv2).any():
raise ValueError(
"The specified threshold does not match what is done by the model."
)
return restore_type(p)
| mit | acca6d45e7ea9560c43bbdd1f0b3c4b4 | 31.190083 | 92 | 0.601797 | 3.918511 | false | false | false | false |
egnyte/gitlabform | gitlabform/ui.py | 1 | 6334 | import sys
from typing import Any
import luddite
import pkg_resources
from cli_ui import (
message,
info,
info_1,
error,
fatal,
reset,
green,
purple,
blue,
red,
yellow,
Symbol,
Token,
)
from cli_ui import debug as verbose
from packaging import version as packaging_version
from urllib.error import URLError
from gitlabform import EXIT_PROCESSING_ERROR, EXIT_INVALID_INPUT, Entities
def show_version(skip_version_check: bool):
local_version = pkg_resources.get_distribution("gitlabform").version
tower_crane = Symbol("🏗", "")
tokens_to_show = [
reset,
tower_crane,
" GitLabForm version:",
blue,
local_version,
reset,
]
message(*tokens_to_show, end="")
if skip_version_check:
# just print end of the line
print()
else:
try:
latest_version = luddite.get_version_pypi("gitlabform")
except URLError as e:
# end the line with current version
print()
error(f"Checking latest version failed:\n{e}")
return
if local_version == latest_version:
happy = Symbol("😊", "")
tokens_to_show = [
"= the latest stable ",
happy,
]
elif packaging_version.parse(local_version) < packaging_version.parse(
latest_version
):
sad = Symbol("😔", "")
tokens_to_show = [
"= outdated ",
sad,
f", please update! (the latest stable is ",
blue,
latest_version,
reset,
")",
]
else:
excited = Symbol("🤩", "")
tokens_to_show = [
"= pre-release ",
excited,
f" (the latest stable is ",
blue,
latest_version,
reset,
")",
]
message(*tokens_to_show, sep="")
def show_header(
target,
groups_and_projects_provider,
non_empty_configs_provider,
):
if target == "ALL":
info(">>> Getting ALL groups and projects...")
elif target == "ALL_DEFINED":
info(">>> Getting ALL_DEFINED groups and projects...")
else:
info(">>> Getting requested groups/projects...")
groups, projects = groups_and_projects_provider.get_groups_and_projects(target)
if len(groups.get_effective()) == 0 and len(projects.get_effective()) == 0:
if target == "ALL":
error_message = "GitLab has no projects and groups!"
elif target == "ALL_DEFINED":
error_message = (
"Configuration does not have any groups or projects defined!"
)
else:
error_message = f"Project or group {target} cannot be found in GitLab!"
fatal(
error_message,
exit_code=EXIT_INVALID_INPUT,
)
(
groups,
projects,
) = non_empty_configs_provider.omit_groups_and_projects_with_empty_configs(
groups, projects
)
show_input_entities(groups)
show_input_entities(projects)
return projects.get_effective(), groups.get_effective()
def show_input_entities(entities: Entities):
info_1(f"# of {entities.name} to process: {len(entities.get_effective())}")
entities_omitted = ""
entities_verbose = f"{entities.name}: {entities.get_effective()}"
if entities.any_omitted():
entities_omitted += f"(# of omitted {entities.name} -"
first = True
for reason in entities.omitted:
if len(entities.omitted[reason]) > 0:
if not first:
entities_omitted += ","
entities_omitted += f" {reason}: {len(entities.omitted[reason])}"
entities_verbose += f"\nomitted {entities.name} - {reason}: {entities.get_omitted(reason)}"
first = False
entities_omitted += ")"
if entities_omitted:
info_1(entities_omitted)
verbose(entities_verbose)
def show_summary(
groups_with_non_empty_configs: list,
projects_with_non_empty_configs: list,
successful_groups: int,
successful_projects: int,
failed_groups: dict,
failed_projects: dict,
):
if (
len(groups_with_non_empty_configs) > 0
or len(projects_with_non_empty_configs) > 0
):
info_1(f"# of groups processed successfully: {successful_groups}")
info_1(f"# of projects processed successfully: {successful_projects}")
if len(failed_groups) > 0:
info_1(red, f"# of groups failed: {len(failed_groups)}", reset)
for group_number in failed_groups.keys():
info_1(
red,
f"Failed group {group_number}: {failed_groups[group_number]}",
reset,
)
if len(failed_projects) > 0:
info_1(
red,
f"# of projects failed: {len(failed_projects)}",
reset,
)
for project_number in failed_projects.keys():
info_1(
red,
f"Failed project {project_number}: {failed_projects[project_number]}",
reset,
)
if len(failed_groups) > 0 or len(failed_projects) > 0:
sys.exit(EXIT_PROCESSING_ERROR)
elif successful_groups > 0 or successful_projects > 0:
shine = Symbol("✨", "!!!")
info_1(
green,
f"All requested groups/projects processed successfully!",
reset,
shine,
)
else:
info_1(
yellow,
"Nothing to do.",
reset,
)
def info_group_count(prefix, i: int, n: int, *rest: Token, **kwargs: Any) -> None:
info_count(purple, prefix, i, n, *rest, **kwargs)
def info_project_count(prefix, i: int, n: int, *rest: Token, **kwargs: Any) -> None:
info_count(green, prefix, i, n, *rest, **kwargs)
def info_count(color, prefix, i: int, n: int, *rest: Token, **kwargs: Any) -> None:
num_digits = len(str(n))
counter_format = "(%{}d/%d)".format(num_digits)
counter_str = counter_format % (i, n)
info(color, prefix, reset, counter_str, reset, *rest, **kwargs)
| mit | eab37b3bf584c029580c1411e19f73e0 | 27.727273 | 107 | 0.544146 | 3.930348 | false | false | false | false |
python-cmd2/cmd2 | cmd2/argparse_custom.py | 1 | 58853 | # coding=utf-8
"""
This module adds capabilities to argparse by patching a few of its functions.
It also defines a parser class called Cmd2ArgumentParser which improves error
and help output over normal argparse. All cmd2 code uses this parser and it is
recommended that developers of cmd2-based apps either use it or write their own
parser that inherits from it. This will give a consistent look-and-feel between
the help/error output of built-in cmd2 commands and the app-specific commands.
If you wish to override the parser used by cmd2's built-in commands, see
override_parser.py example.
Since the new capabilities are added by patching at the argparse API level,
they are available whether or not Cmd2ArgumentParser is used. However, the help
and error output of Cmd2ArgumentParser is customized to notate nargs ranges
whereas any other parser class won't be as explicit in their output.
**Added capabilities**
Extends argparse nargs functionality by allowing tuples which specify a range
(min, max). To specify a max value with no upper bound, use a 1-item tuple
(min,)
Example::
# -f argument expects at least 3 values
parser.add_argument('-f', nargs=(3,))
# -f argument expects 3 to 5 values
parser.add_argument('-f', nargs=(3, 5))
**Tab Completion**
cmd2 uses its ArgparseCompleter class to enable argparse-based tab completion
on all commands that use the @with_argparse wrappers. Out of the box you get
tab completion of commands, subcommands, and flag names, as well as instructive
hints about the current argument that print when tab is pressed. In addition,
you can add tab completion for each argument's values using parameters passed
to add_argument().
Below are the 3 add_argument() parameters for enabling tab completion of an
argument's value. Only one can be used at a time.
``choices`` - pass a list of values to the choices parameter.
Example::
my_list = ['An Option', 'SomeOtherOption']
parser.add_argument('-o', '--options', choices=my_list)
``choices_provider`` - pass a function that returns choices. This is good in
cases where the choice list is dynamically generated when the user hits tab.
Example::
def my_choices_provider(self):
...
return my_generated_list
parser.add_argument("arg", choices_provider=my_choices_provider)
``completer`` - pass a tab completion function that does custom completion.
cmd2 provides a few completer methods for convenience (e.g., path_complete,
delimiter_complete)
Example::
# This adds file-path completion to an argument
parser.add_argument('-o', '--options', completer=cmd2.Cmd.path_complete)
You can use functools.partial() to prepopulate values of the underlying
choices and completer functions/methods.
Example::
# This says to call path_complete with a preset value for its path_filter argument
dir_completer = functools.partial(path_complete,
path_filter=lambda path: os.path.isdir(path))
parser.add_argument('-o', '--options', completer=dir_completer)
For ``choices_provider`` and ``completer``, do not set them to a bound method. This
is because ArgparseCompleter passes the `self` argument explicitly to these
functions. When ArgparseCompleter calls one, it will detect whether it is bound
to a `Cmd` subclass or `CommandSet`. If bound to a `cmd2.Cmd subclass`, it will
pass the app instance as the `self` argument. If bound to a `cmd2.CommandSet`
subclass, it will pass the `CommandSet` instance as the `self` argument.
Therefore instead of passing something like `self.path_complete`, pass
`cmd2.Cmd.path_complete`.
``choices_provider`` and ``completer`` functions can also be implemented as
standalone functions (i.e. not a member of a class). In this case,
ArgparseCompleter will pass its ``cmd2.Cmd`` app instance as the first
positional argument.
Of the 3 tab completion parameters, ``choices`` is the only one where argparse
validates user input against items in the choices list. This is because the
other 2 parameters are meant to tab complete data sets that are viewed as
dynamic. Therefore it is up to the developer to validate if the user has typed
an acceptable value for these arguments.
There are times when what's being tab completed is determined by a previous
argument on the command line. In theses cases, ArgparseCompleter can pass a
dictionary that maps the command line tokens up through the one being completed
to their argparse argument name. To receive this dictionary, your
choices/completer function should have an argument called arg_tokens.
Example::
def my_choices_provider(self, arg_tokens)
def my_completer(self, text, line, begidx, endidx, arg_tokens)
All values of the arg_tokens dictionary are lists, even if a particular
argument expects only 1 token. Since ArgparseCompleter is for tab completion,
it does not convert the tokens to their actual argument types or validate their
values. All tokens are stored in the dictionary as the raw strings provided on
the command line. It is up to the developer to determine if the user entered
the correct argument type (e.g. int) and validate their values.
CompletionItem Class - This class was added to help in cases where
uninformative data is being tab completed. For instance, tab completing ID
numbers isn't very helpful to a user without context. Returning a list of
CompletionItems instead of a regular string for completion results will signal
the ArgparseCompleter to output the completion results in a table of completion
tokens with descriptions instead of just a table of tokens::
Instead of this:
1 2 3
The user sees this:
ITEM_ID Item Name
============================
1 My item
2 Another item
3 Yet another item
The left-most column is the actual value being tab completed and its header is
that value's name. The right column header is defined using the
descriptive_header parameter of add_argument(). The right column values come
from the CompletionItem.description value.
Example::
token = 1
token_description = "My Item"
completion_item = CompletionItem(token, token_description)
Since descriptive_header and CompletionItem.description are just strings, you
can format them in such a way to have multiple columns::
ITEM_ID Item Name Checked Out Due Date
==========================================================
1 My item True 02/02/2022
2 Another item False
3 Yet another item False
To use CompletionItems, just return them from your choices_provider or
completer functions. They can also be used as argparse choices. When a
CompletionItem is created, it stores the original value (e.g. ID number) and
makes it accessible through a property called orig_value. cmd2 has patched
argparse so that when evaluating choices, input is compared to
CompletionItem.orig_value instead of the CompletionItem instance.
To avoid printing a ton of information to the screen at once when a user
presses tab, there is a maximum threshold for the number of CompletionItems
that will be shown. Its value is defined in cmd2.Cmd.max_completion_items. It
defaults to 50, but can be changed. If the number of completion suggestions
exceeds this number, they will be displayed in the typical columnized format
and will not include the description value of the CompletionItems.
**Patched argparse functions**
``argparse._ActionsContainer.add_argument`` - adds arguments related to tab
completion and enables nargs range parsing. See _add_argument_wrapper for
more details on these arguments.
``argparse.ArgumentParser._check_value`` - adds support for using
``CompletionItems`` as argparse choices. When evaluating choices, input is
compared to ``CompletionItem.orig_value`` instead of the ``CompletionItem``
instance.
See _ArgumentParser_check_value for more details.
``argparse.ArgumentParser._get_nargs_pattern`` - adds support for nargs ranges.
See _get_nargs_pattern_wrapper for more details.
``argparse.ArgumentParser._match_argument`` - adds support for nargs ranges.
See _match_argument_wrapper for more details.
``argparse._SubParsersAction.remove_parser`` - new function which removes a
sub-parser from a sub-parsers group. See _SubParsersAction_remove_parser for
more details.
**Added accessor methods**
cmd2 has patched ``argparse.Action`` to include the following accessor methods
for cases in which you need to manually access the cmd2-specific attributes.
- ``argparse.Action.get_choices_callable()`` - See
:func:`_action_get_choices_callable` for more details.
- ``argparse.Action.set_choices_provider()`` - See
:func:`_action_set_choices_provider` for more details.
- ``argparse.Action.set_completer()`` - See
:func:`_action_set_completer` for more details.
- ``argparse.Action.get_descriptive_header()`` - See
:func:`_action_get_descriptive_header` for more details.
- ``argparse.Action.set_descriptive_header()`` - See
:func:`_action_set_descriptive_header` for more details.
- ``argparse.Action.get_nargs_range()`` - See
:func:`_action_get_nargs_range` for more details.
- ``argparse.Action.set_nargs_range()`` - See
:func:`_action_set_nargs_range` for more details.
- ``argparse.Action.get_suppress_tab_hint()`` - See
:func:`_action_get_suppress_tab_hint` for more details.
- ``argparse.Action.set_suppress_tab_hint()`` - See
:func:`_action_set_suppress_tab_hint` for more details.
cmd2 has patched ``argparse.ArgumentParser`` to include the following accessor methods
- ``argparse.ArgumentParser.get_ap_completer_type()`` - See
:func:`_ArgumentParser_get_ap_completer_type` for more details.
- ``argparse.Action.set_ap_completer_type()`` - See
:func:`_ArgumentParser_set_ap_completer_type` for more details.
**Subcommand removal**
cmd2 has patched ``argparse._SubParsersAction`` to include a ``remove_parser()``
method which can be used to remove a subcommand.
``argparse._SubParsersAction.remove_parser`` - new function which removes a
sub-parser from a sub-parsers group. See
:func:`_SubParsersAction_remove_parser` for more details.
"""
import argparse
import re
import sys
# noinspection PyUnresolvedReferences,PyProtectedMember
from argparse import (
ONE_OR_MORE,
ZERO_OR_MORE,
ArgumentError,
)
from gettext import (
gettext,
)
from typing import (
IO,
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
NoReturn,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
from . import (
ansi,
constants,
)
try:
from typing import (
Protocol,
runtime_checkable,
)
except ImportError:
from typing_extensions import ( # type: ignore[misc]
Protocol,
runtime_checkable,
)
if TYPE_CHECKING: # pragma: no cover
from .argparse_completer import (
ArgparseCompleter,
)
def generate_range_error(range_min: int, range_max: Union[int, float]) -> str:
"""Generate an error message when the the number of arguments provided is not within the expected range"""
err_str = "expected "
if range_max == constants.INFINITY:
plural = '' if range_min == 1 else 's'
err_str += f"at least {range_min}"
else:
plural = '' if range_max == 1 else 's'
if range_min == range_max:
err_str += f"{range_min}"
else:
err_str += f"{range_min} to {range_max}"
err_str += f" argument{plural}"
return err_str
class CompletionItem(str):
"""
Completion item with descriptive text attached
See header of this file for more information
"""
def __new__(cls, value: object, *args: Any, **kwargs: Any) -> 'CompletionItem':
return super(CompletionItem, cls).__new__(cls, value)
# noinspection PyUnusedLocal
def __init__(self, value: object, description: str = '', *args: Any) -> None:
"""
CompletionItem Initializer
:param value: the value being tab completed
:param description: description text to display
:param args: args for str __init__
:param kwargs: kwargs for str __init__
"""
super().__init__(*args)
self.description = description
# Save the original value to support CompletionItems as argparse choices.
# cmd2 has patched argparse so input is compared to this value instead of the CompletionItem instance.
self._orig_value = value
@property
def orig_value(self) -> Any:
"""Read-only property for _orig_value"""
return self._orig_value
############################################################################################################
# Class and functions related to ChoicesCallable
############################################################################################################
@runtime_checkable
class ChoicesProviderFuncBase(Protocol):
"""
Function that returns a list of choices in support of tab completion
"""
def __call__(self) -> List[str]:
... # pragma: no cover
@runtime_checkable
class ChoicesProviderFuncWithTokens(Protocol):
"""
Function that returns a list of choices in support of tab completion and accepts a dictionary of prior arguments.
"""
def __call__(self, *, arg_tokens: Dict[str, List[str]] = {}) -> List[str]:
... # pragma: no cover
ChoicesProviderFunc = Union[ChoicesProviderFuncBase, ChoicesProviderFuncWithTokens]
@runtime_checkable
class CompleterFuncBase(Protocol):
"""
Function to support tab completion with the provided state of the user prompt
"""
def __call__(
self,
text: str,
line: str,
begidx: int,
endidx: int,
) -> List[str]:
... # pragma: no cover
@runtime_checkable
class CompleterFuncWithTokens(Protocol):
"""
Function to support tab completion with the provided state of the user prompt and accepts a dictionary of prior
arguments.
"""
def __call__(
self,
text: str,
line: str,
begidx: int,
endidx: int,
*,
arg_tokens: Dict[str, List[str]] = {},
) -> List[str]:
... # pragma: no cover
CompleterFunc = Union[CompleterFuncBase, CompleterFuncWithTokens]
class ChoicesCallable:
"""
Enables using a callable as the choices provider for an argparse argument.
While argparse has the built-in choices attribute, it is limited to an iterable.
"""
def __init__(
self,
is_completer: bool,
to_call: Union[CompleterFunc, ChoicesProviderFunc],
) -> None:
"""
Initializer
:param is_completer: True if to_call is a tab completion routine which expects
the args: text, line, begidx, endidx
:param to_call: the callable object that will be called to provide choices for the argument
"""
self.is_completer = is_completer
if is_completer:
if not isinstance(to_call, (CompleterFuncBase, CompleterFuncWithTokens)): # pragma: no cover
# runtime checking of Protocols do not currently check the parameters of a function.
raise ValueError(
'With is_completer set to true, to_call must be either CompleterFunc, CompleterFuncWithTokens'
)
else:
if not isinstance(to_call, (ChoicesProviderFuncBase, ChoicesProviderFuncWithTokens)): # pragma: no cover
# runtime checking of Protocols do not currently check the parameters of a function.
raise ValueError(
'With is_completer set to false, to_call must be either: '
'ChoicesProviderFuncBase, ChoicesProviderFuncWithTokens'
)
self.to_call = to_call
@property
def completer(self) -> CompleterFunc:
if not isinstance(self.to_call, (CompleterFuncBase, CompleterFuncWithTokens)): # pragma: no cover
# this should've been caught in the constructor, just a backup check
raise ValueError('Function is not a CompleterFunc')
return self.to_call
@property
def choices_provider(self) -> ChoicesProviderFunc:
if not isinstance(self.to_call, (ChoicesProviderFuncBase, ChoicesProviderFuncWithTokens)): # pragma: no cover
# this should've been caught in the constructor, just a backup check
raise ValueError('Function is not a ChoicesProviderFunc')
return self.to_call
############################################################################################################
# The following are names of custom argparse Action attributes added by cmd2
############################################################################################################
# ChoicesCallable object that specifies the function to be called which provides choices to the argument
ATTR_CHOICES_CALLABLE = 'choices_callable'
# Descriptive header that prints when using CompletionItems
ATTR_DESCRIPTIVE_HEADER = 'descriptive_header'
# A tuple specifying nargs as a range (min, max)
ATTR_NARGS_RANGE = 'nargs_range'
# Pressing tab normally displays the help text for the argument if no choices are available
# Setting this attribute to True will suppress these hints
ATTR_SUPPRESS_TAB_HINT = 'suppress_tab_hint'
############################################################################################################
# Patch argparse.Action with accessors for choice_callable attribute
############################################################################################################
def _action_get_choices_callable(self: argparse.Action) -> Optional[ChoicesCallable]:
"""
Get the choices_callable attribute of an argparse Action.
This function is added by cmd2 as a method called ``get_choices_callable()`` to ``argparse.Action`` class.
To call: ``action.get_choices_callable()``
:param self: argparse Action being queried
:return: A ChoicesCallable instance or None if attribute does not exist
"""
return cast(Optional[ChoicesCallable], getattr(self, ATTR_CHOICES_CALLABLE, None))
setattr(argparse.Action, 'get_choices_callable', _action_get_choices_callable)
def _action_set_choices_callable(self: argparse.Action, choices_callable: ChoicesCallable) -> None:
"""
Set the choices_callable attribute of an argparse Action.
This function is added by cmd2 as a method called ``_set_choices_callable()`` to ``argparse.Action`` class.
Call this using the convenience wrappers ``set_choices_provider()`` and ``set_completer()`` instead.
:param self: action being edited
:param choices_callable: the ChoicesCallable instance to use
:raises: TypeError if used on incompatible action type
"""
# Verify consistent use of parameters
if self.choices is not None:
err_msg = "None of the following parameters can be used alongside a choices parameter:\n" "choices_provider, completer"
raise (TypeError(err_msg))
elif self.nargs == 0:
err_msg = (
"None of the following parameters can be used on an action that takes no arguments:\n"
"choices_provider, completer"
)
raise (TypeError(err_msg))
setattr(self, ATTR_CHOICES_CALLABLE, choices_callable)
setattr(argparse.Action, '_set_choices_callable', _action_set_choices_callable)
def _action_set_choices_provider(
self: argparse.Action,
choices_provider: ChoicesProviderFunc,
) -> None:
"""
Set choices_provider of an argparse Action.
This function is added by cmd2 as a method called ``set_choices_callable()`` to ``argparse.Action`` class.
To call: ``action.set_choices_provider(choices_provider)``
:param self: action being edited
:param choices_provider: the choices_provider instance to use
:raises: TypeError if used on incompatible action type
"""
self._set_choices_callable(ChoicesCallable(is_completer=False, to_call=choices_provider)) # type: ignore[attr-defined]
setattr(argparse.Action, 'set_choices_provider', _action_set_choices_provider)
def _action_set_completer(
self: argparse.Action,
completer: CompleterFunc,
) -> None:
"""
Set completer of an argparse Action.
This function is added by cmd2 as a method called ``set_completer()`` to ``argparse.Action`` class.
To call: ``action.set_completer(completer)``
:param self: action being edited
:param completer: the completer instance to use
:raises: TypeError if used on incompatible action type
"""
self._set_choices_callable(ChoicesCallable(is_completer=True, to_call=completer)) # type: ignore[attr-defined]
setattr(argparse.Action, 'set_completer', _action_set_completer)
############################################################################################################
# Patch argparse.Action with accessors for descriptive_header attribute
############################################################################################################
def _action_get_descriptive_header(self: argparse.Action) -> Optional[str]:
"""
Get the descriptive_header attribute of an argparse Action.
This function is added by cmd2 as a method called ``get_descriptive_header()`` to ``argparse.Action`` class.
To call: ``action.get_descriptive_header()``
:param self: argparse Action being queried
:return: The value of descriptive_header or None if attribute does not exist
"""
return cast(Optional[str], getattr(self, ATTR_DESCRIPTIVE_HEADER, None))
setattr(argparse.Action, 'get_descriptive_header', _action_get_descriptive_header)
def _action_set_descriptive_header(self: argparse.Action, descriptive_header: Optional[str]) -> None:
"""
Set the descriptive_header attribute of an argparse Action.
This function is added by cmd2 as a method called ``set_descriptive_header()`` to ``argparse.Action`` class.
To call: ``action.set_descriptive_header(descriptive_header)``
:param self: argparse Action being updated
:param descriptive_header: value being assigned
"""
setattr(self, ATTR_DESCRIPTIVE_HEADER, descriptive_header)
setattr(argparse.Action, 'set_descriptive_header', _action_set_descriptive_header)
############################################################################################################
# Patch argparse.Action with accessors for nargs_range attribute
############################################################################################################
def _action_get_nargs_range(self: argparse.Action) -> Optional[Tuple[int, Union[int, float]]]:
"""
Get the nargs_range attribute of an argparse Action.
This function is added by cmd2 as a method called ``get_nargs_range()`` to ``argparse.Action`` class.
To call: ``action.get_nargs_range()``
:param self: argparse Action being queried
:return: The value of nargs_range or None if attribute does not exist
"""
return cast(Optional[Tuple[int, Union[int, float]]], getattr(self, ATTR_NARGS_RANGE, None))
setattr(argparse.Action, 'get_nargs_range', _action_get_nargs_range)
def _action_set_nargs_range(self: argparse.Action, nargs_range: Optional[Tuple[int, Union[int, float]]]) -> None:
"""
Set the nargs_range attribute of an argparse Action.
This function is added by cmd2 as a method called ``set_nargs_range()`` to ``argparse.Action`` class.
To call: ``action.set_nargs_range(nargs_range)``
:param self: argparse Action being updated
:param nargs_range: value being assigned
"""
setattr(self, ATTR_NARGS_RANGE, nargs_range)
setattr(argparse.Action, 'set_nargs_range', _action_set_nargs_range)
############################################################################################################
# Patch argparse.Action with accessors for suppress_tab_hint attribute
############################################################################################################
def _action_get_suppress_tab_hint(self: argparse.Action) -> bool:
"""
Get the suppress_tab_hint attribute of an argparse Action.
This function is added by cmd2 as a method called ``get_suppress_tab_hint()`` to ``argparse.Action`` class.
To call: ``action.get_suppress_tab_hint()``
:param self: argparse Action being queried
:return: The value of suppress_tab_hint or False if attribute does not exist
"""
return cast(bool, getattr(self, ATTR_SUPPRESS_TAB_HINT, False))
setattr(argparse.Action, 'get_suppress_tab_hint', _action_get_suppress_tab_hint)
def _action_set_suppress_tab_hint(self: argparse.Action, suppress_tab_hint: bool) -> None:
"""
Set the suppress_tab_hint attribute of an argparse Action.
This function is added by cmd2 as a method called ``set_suppress_tab_hint()`` to ``argparse.Action`` class.
To call: ``action.set_suppress_tab_hint(suppress_tab_hint)``
:param self: argparse Action being updated
:param suppress_tab_hint: value being assigned
"""
setattr(self, ATTR_SUPPRESS_TAB_HINT, suppress_tab_hint)
setattr(argparse.Action, 'set_suppress_tab_hint', _action_set_suppress_tab_hint)
############################################################################################################
# Allow developers to add custom action attributes
############################################################################################################
CUSTOM_ACTION_ATTRIBS: Set[str] = set()
_CUSTOM_ATTRIB_PFX = '_attr_'
def register_argparse_argument_parameter(param_name: str, param_type: Optional[Type[Any]]) -> None:
"""
Registers a custom argparse argument parameter.
The registered name will then be a recognized keyword parameter to the parser's `add_argument()` function.
An accessor functions will be added to the parameter's Action object in the form of: ``get_{param_name}()``
and ``set_{param_name}(value)``.
:param param_name: Name of the parameter to add.
:param param_type: Type of the parameter to add.
"""
attr_name = f'{_CUSTOM_ATTRIB_PFX}{param_name}'
if param_name in CUSTOM_ACTION_ATTRIBS or hasattr(argparse.Action, attr_name):
raise KeyError(f'Custom parameter {param_name} already exists')
if not re.search('^[A-Za-z_][A-Za-z0-9_]*$', param_name):
raise KeyError(f'Invalid parameter name {param_name} - cannot be used as a python identifier')
getter_name = f'get_{param_name}'
def _action_get_custom_parameter(self: argparse.Action) -> Any:
f"""
Get the custom {param_name} attribute of an argparse Action.
This function is added by cmd2 as a method called ``{getter_name}()`` to ``argparse.Action`` class.
To call: ``action.{getter_name}()``
:param self: argparse Action being queried
:return: The value of {param_name} or None if attribute does not exist
"""
return getattr(self, attr_name, None)
setattr(argparse.Action, getter_name, _action_get_custom_parameter)
setter_name = f'set_{param_name}'
def _action_set_custom_parameter(self: argparse.Action, value: Any) -> None:
f"""
Set the custom {param_name} attribute of an argparse Action.
This function is added by cmd2 as a method called ``{setter_name}()`` to ``argparse.Action`` class.
To call: ``action.{setter_name}({param_name})``
:param self: argparse Action being updated
:param value: value being assigned
"""
if param_type and not isinstance(value, param_type):
raise TypeError(f'{param_name} must be of type {param_type}, got: {value} ({type(value)})')
setattr(self, attr_name, value)
setattr(argparse.Action, setter_name, _action_set_custom_parameter)
CUSTOM_ACTION_ATTRIBS.add(param_name)
############################################################################################################
# Patch _ActionsContainer.add_argument with our wrapper to support more arguments
############################################################################################################
# Save original _ActionsContainer.add_argument so we can call it in our wrapper
# noinspection PyProtectedMember
orig_actions_container_add_argument = argparse._ActionsContainer.add_argument
# noinspection PyProtectedMember
def _add_argument_wrapper(
self: argparse._ActionsContainer,
*args: Any,
nargs: Union[int, str, Tuple[int], Tuple[int, int], Tuple[int, float], None] = None,
choices_provider: Optional[ChoicesProviderFunc] = None,
completer: Optional[CompleterFunc] = None,
suppress_tab_hint: bool = False,
descriptive_header: Optional[str] = None,
**kwargs: Any,
) -> argparse.Action:
"""
Wrapper around _ActionsContainer.add_argument() which supports more settings used by cmd2
# Args from original function
:param self: instance of the _ActionsContainer being added to
:param args: arguments expected by argparse._ActionsContainer.add_argument
# Customized arguments from original function
:param nargs: extends argparse nargs functionality by allowing tuples which specify a range (min, max)
to specify a max value with no upper bound, use a 1-item tuple (min,)
# Added args used by ArgparseCompleter
:param choices_provider: function that provides choices for this argument
:param completer: tab completion function that provides choices for this argument
:param suppress_tab_hint: when ArgparseCompleter has no results to show during tab completion, it displays the
current argument's help text as a hint. Set this to True to suppress the hint. If this
argument's help text is set to argparse.SUPPRESS, then tab hints will not display
regardless of the value passed for suppress_tab_hint. Defaults to False.
:param descriptive_header: if the provided choices are CompletionItems, then this header will display
during tab completion. Defaults to None.
# Args from original function
:param kwargs: keyword-arguments recognized by argparse._ActionsContainer.add_argument
Note: You can only use 1 of the following in your argument:
choices, choices_provider, completer
See the header of this file for more information
:return: the created argument action
:raises: ValueError on incorrect parameter usage
"""
# Verify consistent use of arguments
choices_callables = [choices_provider, completer]
num_params_set = len(choices_callables) - choices_callables.count(None)
if num_params_set > 1:
err_msg = "Only one of the following parameters may be used at a time:\n" "choices_provider, completer"
raise (ValueError(err_msg))
# Pre-process special ranged nargs
nargs_range = None
if nargs is not None:
nargs_adjusted: Union[int, str, Tuple[int], Tuple[int, int], Tuple[int, float], None]
# Check if nargs was given as a range
if isinstance(nargs, tuple):
# Handle 1-item tuple by setting max to INFINITY
if len(nargs) == 1:
nargs = (nargs[0], constants.INFINITY)
# Validate nargs tuple
if (
len(nargs) != 2
or not isinstance(nargs[0], int) # type: ignore[unreachable]
or not (isinstance(nargs[1], int) or nargs[1] == constants.INFINITY) # type: ignore[misc]
):
raise ValueError('Ranged values for nargs must be a tuple of 1 or 2 integers')
if nargs[0] >= nargs[1]: # type: ignore[misc]
raise ValueError('Invalid nargs range. The first value must be less than the second')
if nargs[0] < 0:
raise ValueError('Negative numbers are invalid for nargs range')
# Save the nargs tuple as our range setting
nargs_range = nargs
range_min = nargs_range[0]
range_max = nargs_range[1] # type: ignore[misc]
# Convert nargs into a format argparse recognizes
if range_min == 0:
if range_max == 1:
nargs_adjusted = argparse.OPTIONAL
# No range needed since (0, 1) is just argparse.OPTIONAL
nargs_range = None
else:
nargs_adjusted = argparse.ZERO_OR_MORE
if range_max == constants.INFINITY:
# No range needed since (0, INFINITY) is just argparse.ZERO_OR_MORE
nargs_range = None
elif range_min == 1 and range_max == constants.INFINITY:
nargs_adjusted = argparse.ONE_OR_MORE
# No range needed since (1, INFINITY) is just argparse.ONE_OR_MORE
nargs_range = None
else:
nargs_adjusted = argparse.ONE_OR_MORE
else:
nargs_adjusted = nargs
# Add the argparse-recognized version of nargs to kwargs
kwargs['nargs'] = nargs_adjusted
# Extract registered custom keyword arguments
custom_attribs: Dict[str, Any] = {}
for keyword, value in kwargs.items():
if keyword in CUSTOM_ACTION_ATTRIBS:
custom_attribs[keyword] = value
for keyword in custom_attribs:
del kwargs[keyword]
# Create the argument using the original add_argument function
new_arg = orig_actions_container_add_argument(self, *args, **kwargs)
# Set the custom attributes
new_arg.set_nargs_range(nargs_range) # type: ignore[arg-type, attr-defined]
if choices_provider:
new_arg.set_choices_provider(choices_provider) # type: ignore[attr-defined]
elif completer:
new_arg.set_completer(completer) # type: ignore[attr-defined]
new_arg.set_suppress_tab_hint(suppress_tab_hint) # type: ignore[attr-defined]
new_arg.set_descriptive_header(descriptive_header) # type: ignore[attr-defined]
for keyword, value in custom_attribs.items():
attr_setter = getattr(new_arg, f'set_{keyword}', None)
if attr_setter is not None:
attr_setter(value)
return new_arg
# Overwrite _ActionsContainer.add_argument with our wrapper
# noinspection PyProtectedMember
setattr(argparse._ActionsContainer, 'add_argument', _add_argument_wrapper)
############################################################################################################
# Patch ArgumentParser._get_nargs_pattern with our wrapper to support nargs ranges
############################################################################################################
# Save original ArgumentParser._get_nargs_pattern so we can call it in our wrapper
# noinspection PyProtectedMember
orig_argument_parser_get_nargs_pattern = argparse.ArgumentParser._get_nargs_pattern
# noinspection PyProtectedMember
def _get_nargs_pattern_wrapper(self: argparse.ArgumentParser, action: argparse.Action) -> str:
# Wrapper around ArgumentParser._get_nargs_pattern behavior to support nargs ranges
nargs_range = action.get_nargs_range() # type: ignore[attr-defined]
if nargs_range is not None:
if nargs_range[1] == constants.INFINITY:
range_max = ''
else:
range_max = nargs_range[1] # type: ignore[assignment]
nargs_pattern = f'(-*A{{{nargs_range[0]},{range_max}}}-*)'
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
return nargs_pattern
return orig_argument_parser_get_nargs_pattern(self, action)
# Overwrite ArgumentParser._get_nargs_pattern with our wrapper
# noinspection PyProtectedMember
setattr(argparse.ArgumentParser, '_get_nargs_pattern', _get_nargs_pattern_wrapper)
############################################################################################################
# Patch ArgumentParser._match_argument with our wrapper to support nargs ranges
############################################################################################################
# noinspection PyProtectedMember
orig_argument_parser_match_argument = argparse.ArgumentParser._match_argument
# noinspection PyProtectedMember
def _match_argument_wrapper(self: argparse.ArgumentParser, action: argparse.Action, arg_strings_pattern: str) -> int:
# Wrapper around ArgumentParser._match_argument behavior to support nargs ranges
nargs_pattern = self._get_nargs_pattern(action)
match = re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_range = action.get_nargs_range() # type: ignore[attr-defined]
if nargs_range is not None:
raise ArgumentError(action, generate_range_error(nargs_range[0], nargs_range[1]))
return orig_argument_parser_match_argument(self, action, arg_strings_pattern)
# Overwrite ArgumentParser._match_argument with our wrapper
# noinspection PyProtectedMember
setattr(argparse.ArgumentParser, '_match_argument', _match_argument_wrapper)
############################################################################################################
# Patch argparse.ArgumentParser with accessors for ap_completer_type attribute
############################################################################################################
# An ArgumentParser attribute which specifies a subclass of ArgparseCompleter for custom tab completion behavior on a
# given parser. If this is None or not present, then cmd2 will use argparse_completer.DEFAULT_AP_COMPLETER when tab
# completing a parser's arguments
ATTR_AP_COMPLETER_TYPE = 'ap_completer_type'
# noinspection PyPep8Naming
def _ArgumentParser_get_ap_completer_type(self: argparse.ArgumentParser) -> Optional[Type['ArgparseCompleter']]:
"""
Get the ap_completer_type attribute of an argparse ArgumentParser.
This function is added by cmd2 as a method called ``get_ap_completer_type()`` to ``argparse.ArgumentParser`` class.
To call: ``parser.get_ap_completer_type()``
:param self: ArgumentParser being queried
:return: An ArgparseCompleter-based class or None if attribute does not exist
"""
return cast(Optional[Type['ArgparseCompleter']], getattr(self, ATTR_AP_COMPLETER_TYPE, None))
setattr(argparse.ArgumentParser, 'get_ap_completer_type', _ArgumentParser_get_ap_completer_type)
# noinspection PyPep8Naming
def _ArgumentParser_set_ap_completer_type(self: argparse.ArgumentParser, ap_completer_type: Type['ArgparseCompleter']) -> None:
"""
Set the ap_completer_type attribute of an argparse ArgumentParser.
This function is added by cmd2 as a method called ``set_ap_completer_type()`` to ``argparse.ArgumentParser`` class.
To call: ``parser.set_ap_completer_type(ap_completer_type)``
:param self: ArgumentParser being edited
:param ap_completer_type: the custom ArgparseCompleter-based class to use when tab completing arguments for this parser
"""
setattr(self, ATTR_AP_COMPLETER_TYPE, ap_completer_type)
setattr(argparse.ArgumentParser, 'set_ap_completer_type', _ArgumentParser_set_ap_completer_type)
############################################################################################################
# Patch ArgumentParser._check_value to support CompletionItems as choices
############################################################################################################
# noinspection PyPep8Naming
def _ArgumentParser_check_value(self: argparse.ArgumentParser, action: argparse.Action, value: Any) -> None:
"""
Custom override of ArgumentParser._check_value that supports CompletionItems as choices.
When evaluating choices, input is compared to CompletionItem.orig_value instead of the
CompletionItem instance.
:param self: ArgumentParser instance
:param action: the action being populated
:param value: value from command line already run through conversion function by argparse
"""
# Import gettext like argparse does
from gettext import (
gettext as _,
)
# converted value must be one of the choices (if specified)
if action.choices is not None:
# If any choice is a CompletionItem, then use its orig_value property.
choices = [c.orig_value if isinstance(c, CompletionItem) else c for c in action.choices]
if value not in choices:
args = {'value': value, 'choices': ', '.join(map(repr, choices))}
msg = _('invalid choice: %(value)r (choose from %(choices)s)')
raise ArgumentError(action, msg % args)
setattr(argparse.ArgumentParser, '_check_value', _ArgumentParser_check_value)
############################################################################################################
# Patch argparse._SubParsersAction to add remove_parser function
############################################################################################################
# noinspection PyPep8Naming,PyProtectedMember
def _SubParsersAction_remove_parser(self: argparse._SubParsersAction, name: str) -> None: # type: ignore
"""
Removes a sub-parser from a sub-parsers group. Used to remove subcommands from a parser.
This function is added by cmd2 as a method called ``remove_parser()`` to ``argparse._SubParsersAction`` class.
To call: ``action.remove_parser(name)``
:param self: instance of the _SubParsersAction being edited
:param name: name of the subcommand for the sub-parser to remove
"""
# Remove this subcommand from its base command's help text
for choice_action in self._choices_actions:
if choice_action.dest == name:
self._choices_actions.remove(choice_action)
break
# Remove this subcommand and all its aliases from the base command
subparser = self._name_parser_map.get(name)
if subparser is not None:
to_remove = []
for cur_name, cur_parser in self._name_parser_map.items():
if cur_parser is subparser:
to_remove.append(cur_name)
for cur_name in to_remove:
del self._name_parser_map[cur_name]
# noinspection PyProtectedMember
setattr(argparse._SubParsersAction, 'remove_parser', _SubParsersAction_remove_parser)
############################################################################################################
# Unless otherwise noted, everything below this point are copied from Python's
# argparse implementation with minor tweaks to adjust output.
# Changes are noted if it's buried in a block of copied code. Otherwise the
# function will check for a special case and fall back to the parent function
############################################################################################################
# noinspection PyCompatibility,PyShadowingBuiltins
class Cmd2HelpFormatter(argparse.RawTextHelpFormatter):
"""Custom help formatter to configure ordering of help text"""
# noinspection PyProtectedMember
def _format_usage(
self,
usage: Optional[str],
actions: Iterable[argparse.Action],
groups: Iterable[argparse._ArgumentGroup],
prefix: Optional[str] = None,
) -> str:
if prefix is None:
prefix = gettext('Usage: ')
# if usage is specified, use that
if usage is not None:
usage %= dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
else:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
# Begin cmd2 customization (separates required and optional, applies to all changes in this function)
required_options = []
for action in actions:
if action.option_strings:
if action.required:
required_options.append(action)
else:
optionals.append(action)
else:
positionals.append(action)
# End cmd2 customization
# build full usage string
format = self._format_actions_usage
action_usage = format(required_options + optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# Begin cmd2 customization
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
req_usage = format(required_options, groups)
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
req_parts = re.findall(part_regexp, req_usage)
opt_parts = re.findall(part_regexp, opt_usage)
pos_parts = re.findall(part_regexp, pos_usage)
assert ' '.join(req_parts) == req_usage
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# End cmd2 customization
# helper for wrapping lines
# noinspection PyMissingOrEmptyDocstring,PyShadowingNames
def get_lines(parts: List[str], indent: str, prefix: Optional[str] = None) -> List[str]:
lines: List[str] = []
line: List[str] = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width and line:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent) :]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
# Begin cmd2 customization
if req_parts:
lines = get_lines([prog] + req_parts, indent, prefix)
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
elif opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# End cmd2 customization
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
# Begin cmd2 customization
parts = req_parts + opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(req_parts, indent))
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
# End cmd2 customization
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'Usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_action_invocation(self, action: argparse.Action) -> str:
if not action.option_strings:
default = self._get_default_metavar_for_positional(action)
(metavar,) = self._metavar_formatter(action, default)(1)
return metavar
else:
parts: List[str] = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
return ', '.join(parts)
# Begin cmd2 customization (less verbose)
# if the Optional takes a value, format is:
# -s, --long ARGS
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
return ', '.join(action.option_strings) + ' ' + args_string
# End cmd2 customization
# noinspection PyMethodMayBeStatic
def _determine_metavar(
self,
action: argparse.Action,
default_metavar: Union[str, Tuple[str, ...]],
) -> Union[str, Tuple[str, ...]]:
"""Custom method to determine what to use as the metavar value of an action"""
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
# Begin cmd2 customization (added space after comma)
result = '{%s}' % ', '.join(choice_strs)
# End cmd2 customization
else:
result = default_metavar
return result
def _metavar_formatter(
self,
action: argparse.Action,
default_metavar: Union[str, Tuple[str, ...]],
) -> Callable[[int], Tuple[str, ...]]:
metavar = self._determine_metavar(action, default_metavar)
# noinspection PyMissingOrEmptyDocstring
def format(tuple_size: int) -> Tuple[str, ...]:
if isinstance(metavar, tuple):
return metavar
else:
return (metavar,) * tuple_size
return format
# noinspection PyProtectedMember
def _format_args(self, action: argparse.Action, default_metavar: Union[str, Tuple[str, ...]]) -> str:
"""Customized to handle ranged nargs and make other output less verbose"""
metavar = self._determine_metavar(action, default_metavar)
metavar_formatter = self._metavar_formatter(action, default_metavar)
# Handle nargs specified as a range
nargs_range = action.get_nargs_range() # type: ignore[attr-defined]
if nargs_range is not None:
if nargs_range[1] == constants.INFINITY:
range_str = f'{nargs_range[0]}+'
else:
range_str = f'{nargs_range[0]}..{nargs_range[1]}'
return '{}{{{}}}'.format('%s' % metavar_formatter(1), range_str)
# Make this output less verbose. Do not customize the output when metavar is a
# tuple of strings. Allow argparse's formatter to handle that instead.
elif isinstance(metavar, str):
if action.nargs == ZERO_OR_MORE:
return '[%s [...]]' % metavar_formatter(1)
elif action.nargs == ONE_OR_MORE:
return '%s [...]' % metavar_formatter(1)
elif isinstance(action.nargs, int) and action.nargs > 1:
return '{}{{{}}}'.format('%s' % metavar_formatter(1), action.nargs)
return super()._format_args(action, default_metavar) # type: ignore[arg-type]
# noinspection PyCompatibility
class Cmd2ArgumentParser(argparse.ArgumentParser):
"""Custom ArgumentParser class that improves error and help output"""
def __init__(
self,
prog: Optional[str] = None,
usage: Optional[str] = None,
description: Optional[str] = None,
epilog: Optional[str] = None,
parents: Sequence[argparse.ArgumentParser] = (),
formatter_class: Type[argparse.HelpFormatter] = Cmd2HelpFormatter,
prefix_chars: str = '-',
fromfile_prefix_chars: Optional[str] = None,
argument_default: Optional[str] = None,
conflict_handler: str = 'error',
add_help: bool = True,
allow_abbrev: bool = True,
*,
ap_completer_type: Optional[Type['ArgparseCompleter']] = None,
) -> None:
"""
# Custom parameter added by cmd2
:param ap_completer_type: optional parameter which specifies a subclass of ArgparseCompleter for custom tab completion
behavior on this parser. If this is None or not present, then cmd2 will use
argparse_completer.DEFAULT_AP_COMPLETER when tab completing this parser's arguments
"""
super(Cmd2ArgumentParser, self).__init__(
prog=prog,
usage=usage,
description=description,
epilog=epilog,
parents=parents if parents else [],
formatter_class=formatter_class, # type: ignore[arg-type]
prefix_chars=prefix_chars,
fromfile_prefix_chars=fromfile_prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler,
add_help=add_help,
allow_abbrev=allow_abbrev,
)
self.set_ap_completer_type(ap_completer_type) # type: ignore[attr-defined]
# noinspection PyProtectedMember
def add_subparsers(self, **kwargs: Any) -> argparse._SubParsersAction: # type: ignore
"""
Custom override. Sets a default title if one was not given.
:param kwargs: additional keyword arguments
:return: argparse Subparser Action
"""
if 'title' not in kwargs:
kwargs['title'] = 'subcommands'
return super().add_subparsers(**kwargs)
def error(self, message: str) -> NoReturn:
"""Custom override that applies custom formatting to the error message"""
lines = message.split('\n')
linum = 0
formatted_message = ''
for line in lines:
if linum == 0:
formatted_message = 'Error: ' + line
else:
formatted_message += '\n ' + line
linum += 1
self.print_usage(sys.stderr)
formatted_message = ansi.style_error(formatted_message)
self.exit(2, f'{formatted_message}\n\n')
# noinspection PyProtectedMember
def format_help(self) -> str:
"""Copy of format_help() from argparse.ArgumentParser with tweaks to separately display required parameters"""
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions, self._mutually_exclusive_groups) # type: ignore[arg-type]
# description
formatter.add_text(self.description)
# Begin cmd2 customization (separate required and optional arguments)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
if sys.version_info >= (3, 10):
default_options_group = action_group.title == 'options'
else:
default_options_group = action_group.title == 'optional arguments'
if default_options_group:
# check if the arguments are required, group accordingly
req_args = []
opt_args = []
for action in action_group._group_actions:
if action.required:
req_args.append(action)
else:
opt_args.append(action)
# separately display required arguments
formatter.start_section('required arguments')
formatter.add_text(action_group.description)
formatter.add_arguments(req_args)
formatter.end_section()
# now display truly optional arguments
formatter.start_section('optional arguments')
formatter.add_text(action_group.description)
formatter.add_arguments(opt_args)
formatter.end_section()
else:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# End cmd2 customization
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help() + '\n'
def _print_message(self, message: str, file: Optional[IO[str]] = None) -> None:
# Override _print_message to use style_aware_write() since we use ANSI escape characters to support color
if message:
if file is None:
file = sys.stderr
ansi.style_aware_write(file, message)
class Cmd2AttributeWrapper:
"""
Wraps a cmd2-specific attribute added to an argparse Namespace.
This makes it easy to know which attributes in a Namespace are
arguments from a parser and which were added by cmd2.
"""
def __init__(self, attribute: Any) -> None:
self.__attribute = attribute
def get(self) -> Any:
"""Get the value of the attribute"""
return self.__attribute
def set(self, new_val: Any) -> None:
"""Set the value of the attribute"""
self.__attribute = new_val
# The default ArgumentParser class for a cmd2 app
DEFAULT_ARGUMENT_PARSER: Type[argparse.ArgumentParser] = Cmd2ArgumentParser
def set_default_argument_parser_type(parser_type: Type[argparse.ArgumentParser]) -> None:
"""
Set the default ArgumentParser class for a cmd2 app. This must be called prior to loading cmd2.py if
you want to override the parser for cmd2's built-in commands. See examples/override_parser.py.
"""
global DEFAULT_ARGUMENT_PARSER
DEFAULT_ARGUMENT_PARSER = parser_type
| mit | bdfb6c01da9a58f5c5d1882379c0d621 | 39.421016 | 127 | 0.62233 | 4.409125 | false | false | false | false |
python-cmd2/cmd2 | examples/pirate.py | 1 | 3293 | #!/usr/bin/env python
# coding=utf-8
"""
This example is adapted from the pirate8.py example created by Catherine Devlin and
presented as part of her PyCon 2010 talk.
It demonstrates many features of cmd2.
"""
import cmd2
from cmd2 import (
Fg,
)
from cmd2.constants import (
MULTILINE_TERMINATOR,
)
color_choices = [c.name.lower() for c in Fg]
class Pirate(cmd2.Cmd):
"""A piratical example cmd2 application involving looting and drinking."""
def __init__(self):
"""Initialize the base class as well as this one"""
shortcuts = dict(cmd2.DEFAULT_SHORTCUTS)
shortcuts.update({'~': 'sing'})
super().__init__(multiline_commands=['sing'], terminators=[MULTILINE_TERMINATOR, '...'], shortcuts=shortcuts)
self.default_to_shell = True
self.songcolor = 'blue'
# Make songcolor settable at runtime
self.add_settable(cmd2.Settable('songcolor', str, 'Color to ``sing``', self, choices=color_choices))
# prompts and defaults
self.gold = 0
self.initial_gold = self.gold
self.prompt = 'arrr> '
def precmd(self, line):
"""Runs just before a command line is parsed, but after the prompt is presented."""
self.initial_gold = self.gold
return line
def postcmd(self, stop, line):
"""Runs right before a command is about to return."""
if self.gold != self.initial_gold:
self.poutput('Now we gots {0} doubloons'.format(self.gold))
if self.gold < 0:
self.poutput("Off to debtorrr's prison.")
self.exit_code = 1
stop = True
return stop
# noinspection PyUnusedLocal
def do_loot(self, arg):
"""Seize booty from a passing ship."""
self.gold += 1
def do_drink(self, arg):
"""Drown your sorrrows in rrrum.
drink [n] - drink [n] barrel[s] o' rum."""
try:
self.gold -= int(arg)
except ValueError:
if arg:
self.poutput('''What's "{0}"? I'll take rrrum.'''.format(arg))
self.gold -= 1
def do_quit(self, arg):
"""Quit the application gracefully."""
self.poutput("Quiterrr!")
return True
def do_sing(self, arg):
"""Sing a colorful song."""
self.poutput(cmd2.ansi.style(arg, fg=Fg[self.songcolor.upper()]))
yo_parser = cmd2.Cmd2ArgumentParser()
yo_parser.add_argument('--ho', type=int, default=2, help="How often to chant 'ho'")
yo_parser.add_argument('-c', '--commas', action='store_true', help='Intersperse commas')
yo_parser.add_argument('beverage', help='beverage to drink with the chant')
@cmd2.with_argparser(yo_parser)
def do_yo(self, args):
"""Compose a yo-ho-ho type chant with flexible options."""
chant = ['yo'] + ['ho'] * args.ho
separator = ', ' if args.commas else ' '
chant = separator.join(chant)
self.poutput('{0} and a bottle of {1}'.format(chant, args.beverage))
if __name__ == '__main__':
import sys
# Create an instance of the Pirate derived class and enter the REPL with cmdloop().
pirate = Pirate()
sys_exit_code = pirate.cmdloop()
print('Exiting with code: {!r}'.format(sys_exit_code))
sys.exit(sys_exit_code)
| mit | a644a03425651b187ff185fd49063116 | 31.284314 | 117 | 0.604009 | 3.469968 | false | false | false | false |
python-cmd2/cmd2 | tests_isolated/test_commandset/test_categories.py | 1 | 4417 | #!/usr/bin/env python3
# coding=utf-8
"""
Simple example demonstrating basic CommandSet usage.
"""
from typing import (
Any,
)
import cmd2
from cmd2 import (
CommandSet,
with_default_category,
)
@with_default_category('Default Category')
class MyBaseCommandSet(CommandSet):
"""Defines a default category for all sub-class CommandSets"""
def __init__(self, _: Any):
super(MyBaseCommandSet, self).__init__()
class ChildInheritsParentCategories(MyBaseCommandSet):
"""
This subclass doesn't declare any categories so all commands here are also categorized under 'Default Category'
"""
def do_hello(self, _: cmd2.Statement):
self._cmd.poutput('Hello')
def do_world(self, _: cmd2.Statement):
self._cmd.poutput('World')
@with_default_category('Non-Heritable Category', heritable=False)
class ChildOverridesParentCategoriesNonHeritable(MyBaseCommandSet):
"""
This subclass overrides the 'Default Category' from the parent, but in a non-heritable fashion. Sub-classes of this
CommandSet will not inherit this category and will, instead, inherit 'Default Category'
"""
def do_goodbye(self, _: cmd2.Statement):
self._cmd.poutput('Goodbye')
class GrandchildInheritsGrandparentCategory(ChildOverridesParentCategoriesNonHeritable):
"""
This subclass's parent class declared its default category non-heritable. Instead, it inherits the category defined
by the grandparent class.
"""
def do_aloha(self, _: cmd2.Statement):
self._cmd.poutput('Aloha')
@with_default_category('Heritable Category')
class ChildOverridesParentCategories(MyBaseCommandSet):
"""
This subclass is decorated with a default category that is heritable. This overrides the parent class's default
category declaration.
"""
def do_bonjour(self, _: cmd2.Statement):
self._cmd.poutput('Bonjour')
class GrandchildInheritsHeritable(ChildOverridesParentCategories):
"""
This subclass's parent declares a default category that overrides its parent. As a result, commands in this
CommandSet will be categorized under 'Heritable Category'
"""
def do_monde(self, _: cmd2.Statement):
self._cmd.poutput('Monde')
class ExampleApp(cmd2.Cmd):
"""
Example to demonstrate heritable default categories
"""
def __init__(self):
super(ExampleApp, self).__init__(auto_load_commands=False)
def do_something(self, arg):
self.poutput('this is the something command')
def test_heritable_categories():
app = ExampleApp()
base_cs = MyBaseCommandSet(0)
assert getattr(base_cs, cmd2.constants.CLASS_ATTR_DEFAULT_HELP_CATEGORY, None) == 'Default Category'
child1 = ChildInheritsParentCategories(1)
assert getattr(child1, cmd2.constants.CLASS_ATTR_DEFAULT_HELP_CATEGORY, None) == 'Default Category'
app.register_command_set(child1)
assert getattr(app.cmd_func('hello').__func__, cmd2.constants.CMD_ATTR_HELP_CATEGORY, None) == 'Default Category'
app.unregister_command_set(child1)
child_nonheritable = ChildOverridesParentCategoriesNonHeritable(2)
assert getattr(child_nonheritable, cmd2.constants.CLASS_ATTR_DEFAULT_HELP_CATEGORY, None) != 'Non-Heritable Category'
app.register_command_set(child_nonheritable)
assert getattr(app.cmd_func('goodbye').__func__, cmd2.constants.CMD_ATTR_HELP_CATEGORY, None) == 'Non-Heritable Category'
app.unregister_command_set(child_nonheritable)
grandchild1 = GrandchildInheritsGrandparentCategory(3)
assert getattr(grandchild1, cmd2.constants.CLASS_ATTR_DEFAULT_HELP_CATEGORY, None) == 'Default Category'
app.register_command_set(grandchild1)
assert getattr(app.cmd_func('aloha').__func__, cmd2.constants.CMD_ATTR_HELP_CATEGORY, None) == 'Default Category'
app.unregister_command_set(grandchild1)
child_overrides = ChildOverridesParentCategories(4)
assert getattr(child_overrides, cmd2.constants.CLASS_ATTR_DEFAULT_HELP_CATEGORY, None) == 'Heritable Category'
app.register_command_set(child_overrides)
assert getattr(app.cmd_func('bonjour').__func__, cmd2.constants.CMD_ATTR_HELP_CATEGORY, None) == 'Heritable Category'
app.unregister_command_set(child_overrides)
grandchild2 = GrandchildInheritsHeritable(5)
assert getattr(grandchild2, cmd2.constants.CLASS_ATTR_DEFAULT_HELP_CATEGORY, None) == 'Heritable Category'
| mit | 76c134ae4a148dd9654c0d3abb9d002e | 35.204918 | 125 | 0.725832 | 3.671654 | false | false | false | false |
python-cmd2/cmd2 | cmd2/clipboard.py | 1 | 1251 | # coding=utf-8
"""
This module provides basic ability to copy from and paste to the clipboard/pastebuffer.
"""
from typing import (
cast,
)
import pyperclip # type: ignore[import]
# noinspection PyProtectedMember
# Can we access the clipboard? Should always be true on Windows and Mac, but only sometimes on Linux
# noinspection PyBroadException
try:
# Try getting the contents of the clipboard
_ = pyperclip.paste()
# pyperclip raises at least the following types of exceptions. To be safe, just catch all Exceptions.
# FileNotFoundError on Windows Subsystem for Linux (WSL) when Windows paths are removed from $PATH
# ValueError for headless Linux systems without Gtk installed
# AssertionError can be raised by paste_klipper().
# PyperclipException for pyperclip-specific exceptions
except Exception:
can_clip = False
else:
can_clip = True
def get_paste_buffer() -> str:
"""Get the contents of the clipboard / paste buffer.
:return: contents of the clipboard
"""
pb_str = cast(str, pyperclip.paste())
return pb_str
def write_to_paste_buffer(txt: str) -> None:
"""Copy text to the clipboard / paste buffer.
:param txt: text to copy to the clipboard
"""
pyperclip.copy(txt)
| mit | ecaa92bc118c596bb75cc7ed9666f55a | 27.431818 | 101 | 0.719424 | 3.779456 | false | false | false | false |
python-cmd2/cmd2 | examples/default_categories.py | 1 | 2474 | #!/usr/bin/env python3
# coding=utf-8
"""
Simple example demonstrating basic CommandSet usage.
"""
import cmd2
from cmd2 import (
CommandSet,
with_default_category,
)
@with_default_category('Default Category')
class MyBaseCommandSet(CommandSet):
"""Defines a default category for all sub-class CommandSets"""
pass
class ChildInheritsParentCategories(MyBaseCommandSet):
"""
This subclass doesn't declare any categories so all commands here are also categorized under 'Default Category'
"""
def do_hello(self, _: cmd2.Statement):
self._cmd.poutput('Hello')
def do_world(self, _: cmd2.Statement):
self._cmd.poutput('World')
@with_default_category('Non-Heritable Category', heritable=False)
class ChildOverridesParentCategoriesNonHeritable(MyBaseCommandSet):
"""
This subclass overrides the 'Default Category' from the parent, but in a non-heritable fashion. Sub-classes of this
CommandSet will not inherit this category and will, instead, inherit 'Default Category'
"""
def do_goodbye(self, _: cmd2.Statement):
self._cmd.poutput('Goodbye')
class GrandchildInheritsGrandparentCategory(ChildOverridesParentCategoriesNonHeritable):
"""
This subclass's parent class declared its default category non-heritable. Instead, it inherits the category defined
by the grandparent class.
"""
def do_aloha(self, _: cmd2.Statement):
self._cmd.poutput('Aloha')
@with_default_category('Heritable Category')
class ChildOverridesParentCategories(MyBaseCommandSet):
"""
This subclass is decorated with a default category that is heritable. This overrides the parent class's default
category declaration.
"""
def do_bonjour(self, _: cmd2.Statement):
self._cmd.poutput('Bonjour')
class GrandchildInheritsHeritable(ChildOverridesParentCategories):
"""
This subclass's parent declares a default category that overrides its parent. As a result, commands in this
CommandSet will be categorized under 'Heritable Category'
"""
def do_monde(self, _: cmd2.Statement):
self._cmd.poutput('Monde')
class ExampleApp(cmd2.Cmd):
"""
Example to demonstrate heritable default categories
"""
def __init__(self):
super(ExampleApp, self).__init__()
def do_something(self, arg):
self.poutput('this is the something command')
if __name__ == '__main__':
app = ExampleApp()
app.cmdloop()
| mit | c0a5b7af7032cd0ed94dab1eb9875c10 | 26.797753 | 119 | 0.704123 | 3.902208 | false | false | false | false |
python-cmd2/cmd2 | plugins/template/tests/test_myplugin.py | 1 | 1894 | #
# coding=utf-8
import cmd2_myplugin
from cmd2 import (
cmd2,
)
######
#
# define a class which uses our plugin and some convenience functions
#
######
class MyApp(cmd2_myplugin.MyPluginMixin, cmd2.Cmd):
"""Simple subclass of cmd2.Cmd with our SayMixin plugin included."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cmd2_myplugin.empty_decorator
def do_empty(self, args):
self.poutput("running the empty command")
#
# You can't use a fixture to instantiate your app if you want to use
# to use the capsys fixture to capture the output. cmd2.Cmd sets
# internal variables to sys.stdout and sys.stderr on initialization
# and then uses those internal variables instead of sys.stdout. It does
# this so you can redirect output from within the app. The capsys fixture
# can't capture the output properly in this scenario.
#
# If you have extensive initialization needs, create a function
# to initialize your cmd2 application.
def init_app():
app = MyApp()
return app
#####
#
# unit tests
#
#####
def test_say(capsys):
# call our initialization function instead of using a fixture
app = init_app()
# run our mixed in command
app.onecmd_plus_hooks('say hello')
# use the capsys fixture to retrieve the output on stdout and stderr
out, err = capsys.readouterr()
# make our assertions
assert out == 'in postparsing hook\nhello\n'
assert not err
def test_decorator(capsys):
# call our initialization function instead of using a fixture
app = init_app()
# run one command in the app
app.onecmd_plus_hooks('empty')
# use the capsys fixture to retrieve the output on stdout and stderr
out, err = capsys.readouterr()
# make our assertions
assert out == 'in postparsing hook\nin the empty decorator\nrunning the empty command\n'
assert not err
| mit | 49504b48f5a5ef9de5fd00c4d8669ef6 | 24.945205 | 92 | 0.693242 | 3.780439 | false | false | false | false |
python-cmd2/cmd2 | examples/hooks.py | 1 | 4507 | #!/usr/bin/env python
# coding=utf-8
"""
A sample application for cmd2 demonstrating how to use hooks.
This application shows how to use postparsing hooks to allow case insensitive
command names, abbreviated commands, as well as allowing numeric arguments to
follow a command without any intervening whitespace.
"""
import re
from typing import (
List,
)
import cmd2
class CmdLineApp(cmd2.Cmd):
"""Example cmd2 application demonstrating the use of hooks.
This simple application has one command, `list` which generates a list
of 10 numbers. This command takes one optional argument, which is the
number to start on.
We have three postparsing hooks, which allow the user to enter:
(Cmd) list 5
(Cmd) L 5
(Cmd) l 5
(Cmd) L5
(Cmd) LI5
and have them all treated as valid input which prints a list of 10 numbers
starting with the number 5.
We also add a postcommand hook, which updates the shell prompt to show the
raw contents of the Statement after the postparsing hooks are finished. To
use this hook, run `(Cmd) set debug True`. All of the above variations of
the list command should produce the same raw content.
"""
# Setting this true makes it run a shell command if a cmd2/cmd command doesn't exist
# default_to_shell = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# register four hooks
self.register_postparsing_hook(self.add_whitespace_hook)
self.register_postparsing_hook(self.downcase_hook)
self.register_postparsing_hook(self.abbrev_hook)
self.register_postcmd_hook(self.proof_hook)
def add_whitespace_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A hook to split alphabetic command names immediately followed by a number.
l24 -> l 24
list24 -> list 24
list 24 -> list 24
"""
command = data.statement.command
# regular expression with looks for:
# ^ - the beginning of the string
# ([^\s\d]+) - one or more non-whitespace non-digit characters, set as capture group 1
# (\d+) - one or more digit characters, set as capture group 2
command_pattern = re.compile(r'^([^\s\d]+)(\d+)')
match = command_pattern.search(command)
if match:
command = match.group(1)
first_arg = match.group(2)
rest_args = data.statement.args
post_command = data.statement.post_command
data.statement = self.statement_parser.parse(f'{command} {first_arg} {rest_args} {post_command}')
return data
def downcase_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""A hook to make uppercase commands lowercase."""
command = data.statement.command.lower()
args = data.statement.args
post_command = data.statement.post_command
data.statement = self.statement_parser.parse(f'{command} {args} {post_command}')
return data
def abbrev_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
"""Accept unique abbreviated commands"""
func = self.cmd_func(data.statement.command)
if func is None:
# check if the entered command might be an abbreviation
possible_cmds = [cmd for cmd in self.get_all_commands() if cmd.startswith(data.statement.command)]
if len(possible_cmds) == 1:
raw = data.statement.raw.replace(data.statement.command, possible_cmds[0], 1)
data.statement = self.statement_parser.parse(raw)
return data
def proof_hook(self, data: cmd2.plugin.PostcommandData) -> cmd2.plugin.PostcommandData:
"""Update the shell prompt with the new raw statement after postparsing hooks are finished"""
if self.debug:
self.prompt = f'({data.statement.raw})'
return data
@cmd2.with_argument_list
def do_list(self, arglist: List[str]) -> None:
"""Generate a list of 10 numbers."""
if arglist:
first = arglist[0]
try:
first = int(first)
except ValueError:
first = 1
else:
first = 1
last = first + 10
for x in range(first, last):
self.poutput(str(x))
if __name__ == '__main__':
import sys
c = CmdLineApp()
sys.exit(c.cmdloop())
| mit | b33e2e34014ce01f10a4fcff598ac256 | 35.056 | 110 | 0.636787 | 4.031306 | false | false | false | false |
python-cmd2/cmd2 | examples/migrating.py | 1 | 1221 | #!/usr/bin/env python
# coding=utf-8
"""
A sample application for cmd which can be used to show how to migrate to cmd2.
"""
import cmd
import random
class CmdLineApp(cmd.Cmd):
"""Example cmd application."""
MUMBLES = ['like', '...', 'um', 'er', 'hmmm', 'ahh']
MUMBLE_FIRST = ['so', 'like', 'well']
MUMBLE_LAST = ['right?']
def do_exit(self, line):
"""Exit the application"""
return True
do_EOF = do_exit
do_quit = do_exit
def do_speak(self, line):
"""Repeats what you tell me to."""
print(line, file=self.stdout)
do_say = do_speak
def do_mumble(self, line):
"""Mumbles what you tell me to."""
words = line.split(' ')
output = []
if random.random() < 0.33:
output.append(random.choice(self.MUMBLE_FIRST))
for word in words:
if random.random() < 0.40:
output.append(random.choice(self.MUMBLES))
output.append(word)
if random.random() < 0.25:
output.append(random.choice(self.MUMBLE_LAST))
print(' '.join(output), file=self.stdout)
if __name__ == '__main__':
import sys
c = CmdLineApp()
sys.exit(c.cmdloop())
| mit | 921122c8f0ca5429b5ae5227635fda9b | 23.918367 | 78 | 0.556102 | 3.317935 | false | false | false | false |
python-cmd2/cmd2 | cmd2/history.py | 1 | 14524 | # coding=utf-8
"""
History management classes
"""
import json
import re
from collections import (
OrderedDict,
)
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Union,
overload,
)
import attr
from . import (
utils,
)
from .parsing import (
Statement,
)
@attr.s(auto_attribs=True, frozen=True)
class HistoryItem:
"""Class used to represent one command in the history list"""
_listformat = ' {:>4} {}'
_ex_listformat = ' {:>4}x {}'
# Used in JSON dictionaries
_statement_field = 'statement'
statement: Statement = attr.ib(default=None, validator=attr.validators.instance_of(Statement))
def __str__(self) -> str:
"""A convenient human readable representation of the history item"""
return self.statement.raw
@property
def raw(self) -> str:
"""The raw input from the user for this item.
Proxy property for ``self.statement.raw``
"""
return self.statement.raw
@property
def expanded(self) -> str:
"""Return the command as run which includes shortcuts and aliases resolved
plus any changes made in hooks
Proxy property for ``self.statement.expanded_command_line``
"""
return self.statement.expanded_command_line
def pr(self, idx: int, script: bool = False, expanded: bool = False, verbose: bool = False) -> str:
"""Represent this item in a pretty fashion suitable for printing.
If you pass verbose=True, script and expanded will be ignored
:param idx: The 1-based index of this item in the history list
:param script: True if formatting for a script (No item numbers)
:param expanded: True if expanded command line should be printed
:param verbose: True if expanded and raw should both appear when they are different
:return: pretty print string version of a HistoryItem
"""
if verbose:
raw = self.raw.rstrip()
expanded_command = self.expanded
ret_str = self._listformat.format(idx, raw)
if raw != expanded_command:
ret_str += '\n' + self._ex_listformat.format(idx, expanded_command)
else:
if expanded:
ret_str = self.expanded
else:
ret_str = self.raw.rstrip()
# In non-verbose mode, display raw multiline commands on 1 line
if self.statement.multiline_command:
# This is an approximation and not meant to be a perfect piecing together of lines.
# All newlines will be converted to spaces, including the ones in quoted strings that
# are considered literals. Also if the final line starts with a terminator, then the
# terminator will have an extra space before it in the 1 line version.
ret_str = ret_str.replace('\n', ' ')
# Display a numbered list if not writing to a script
if not script:
ret_str = self._listformat.format(idx, ret_str)
return ret_str
def to_dict(self) -> Dict[str, Any]:
"""Utility method to convert this HistoryItem into a dictionary for use in persistent JSON history files"""
return {HistoryItem._statement_field: self.statement.to_dict()}
@staticmethod
def from_dict(source_dict: Dict[str, Any]) -> 'HistoryItem':
"""
Utility method to restore a HistoryItem from a dictionary
:param source_dict: source data dictionary (generated using to_dict())
:return: HistoryItem object
:raises KeyError: if source_dict is missing required elements
"""
statement_dict = source_dict[HistoryItem._statement_field]
return HistoryItem(Statement.from_dict(statement_dict))
class History(List[HistoryItem]):
"""A list of :class:`~cmd2.history.HistoryItem` objects with additional methods
for searching and managing the list.
:class:`~cmd2.Cmd` instantiates this class into the :data:`~cmd2.Cmd.history`
attribute, and adds commands to it as a user enters them.
See :ref:`features/history:History` for information about the built-in command
which allows users to view, search, run, and save previously entered commands.
Developers interested in accessing previously entered commands can use this
class to gain access to the historical record.
"""
# Used in JSON dictionaries
_history_version = '1.0.0'
_history_version_field = 'history_version'
_history_items_field = 'history_items'
def __init__(self, seq: Iterable[HistoryItem] = ()) -> None:
super(History, self).__init__(seq)
self.session_start_index = 0
def start_session(self) -> None:
"""Start a new session, thereby setting the next index as the first index in the new session."""
self.session_start_index = len(self)
# noinspection PyMethodMayBeStatic
def _zero_based_index(self, onebased: Union[int, str]) -> int:
"""Convert a one-based index to a zero-based index."""
result = int(onebased)
if result > 0:
result -= 1
return result
@overload
def append(self, new: HistoryItem) -> None:
... # pragma: no cover
@overload
def append(self, new: Statement) -> None:
... # pragma: no cover
def append(self, new: Union[Statement, HistoryItem]) -> None:
"""Append a new statement to the end of the History list.
:param new: Statement object which will be composed into a HistoryItem
and added to the end of the list
"""
history_item = HistoryItem(new) if isinstance(new, Statement) else new
super(History, self).append(history_item)
def clear(self) -> None:
"""Remove all items from the History list."""
super().clear()
self.start_session()
def get(self, index: int) -> HistoryItem:
"""Get item from the History list using 1-based indexing.
:param index: optional item to get
:return: a single :class:`~cmd2.history.HistoryItem`
"""
if index == 0:
raise IndexError('The first command in history is command 1.')
elif index < 0:
return self[index]
else:
return self[index - 1]
# This regular expression parses input for the span() method. There are five parts:
#
# ^\s* matches any whitespace at the beginning of the
# input. This is here so you don't have to trim the input
#
# (?P<start>-?[1-9]{1}\d*)? create a capture group named 'start' which matches an
# optional minus sign, followed by exactly one non-zero
# digit, and as many other digits as you want. This group
# is optional so that we can match an input string like '..2'.
# This regex will match 1, -1, 10, -10, but not 0 or -0.
#
# (?P<separator>:|(\.{2,}))? create a capture group named 'separator' which matches either
# a colon or two periods.
#
# (?P<end>-?[1-9]{1}\d*)? create a capture group named 'end' which matches an
# optional minus sign, followed by exactly one non-zero
# digit, and as many other digits as you want. This group is
# optional so that we can match an input string like ':'
# or '5:'. This regex will match 1, -1, 10, -10, but not
# 0 or -0.
#
# \s*$ match any whitespace at the end of the input. This is here so
# you don't have to trim the input
#
spanpattern = re.compile(r'^\s*(?P<start>-?[1-9]\d*)?(?P<separator>:|(\.{2,}))(?P<end>-?[1-9]\d*)?\s*$')
def span(self, span: str, include_persisted: bool = False) -> 'OrderedDict[int, HistoryItem]':
"""Return a slice of the History list
:param span: string containing an index or a slice
:param include_persisted: if True, then retrieve full results including from persisted history
:return: a dictionary of history items keyed by their 1-based index in ascending order,
or an empty dictionary if no results were found
This method can accommodate input in any of these forms:
a..b or a:b
a.. or a:
..a or :a
-a.. or -a:
..-a or :-a
Different from native python indexing and slicing of arrays, this method
uses 1-based array numbering. Users who are not programmers can't grok
zero based numbering. Programmers can sometimes grok zero based numbering.
Which reminds me, there are only two hard problems in programming:
- naming
- cache invalidation
- off by one errors
"""
results = self.spanpattern.search(span)
if not results:
# our regex doesn't match the input, bail out
raise ValueError('History indices must be positive or negative integers, and may not be zero.')
start_token = results.group('start')
if start_token:
start = min(self._zero_based_index(start_token), len(self) - 1)
if start < 0:
start = max(0, len(self) + start)
else:
start = 0 if include_persisted else self.session_start_index
end_token = results.group('end')
if end_token:
end = min(int(end_token), len(self))
if end < 0:
end = max(0, len(self) + end + 1)
else:
end = len(self)
return self._build_result_dictionary(start, end)
def str_search(self, search: str, include_persisted: bool = False) -> 'OrderedDict[int, HistoryItem]':
"""Find history items which contain a given string
:param search: the string to search for
:param include_persisted: if True, then search full history including persisted history
:return: a dictionary of history items keyed by their 1-based index in ascending order,
or an empty dictionary if the string was not found
"""
def isin(history_item: HistoryItem) -> bool:
"""filter function for string search of history"""
sloppy = utils.norm_fold(search)
inraw = sloppy in utils.norm_fold(history_item.raw)
inexpanded = sloppy in utils.norm_fold(history_item.expanded)
return inraw or inexpanded
start = 0 if include_persisted else self.session_start_index
return self._build_result_dictionary(start, len(self), isin)
def regex_search(self, regex: str, include_persisted: bool = False) -> 'OrderedDict[int, HistoryItem]':
"""Find history items which match a given regular expression
:param regex: the regular expression to search for.
:param include_persisted: if True, then search full history including persisted history
:return: a dictionary of history items keyed by their 1-based index in ascending order,
or an empty dictionary if the regex was not matched
"""
regex = regex.strip()
if regex.startswith(r'/') and regex.endswith(r'/'):
regex = regex[1:-1]
finder = re.compile(regex, re.DOTALL | re.MULTILINE)
def isin(hi: HistoryItem) -> bool:
"""filter function for doing a regular expression search of history"""
return bool(finder.search(hi.raw) or finder.search(hi.expanded))
start = 0 if include_persisted else self.session_start_index
return self._build_result_dictionary(start, len(self), isin)
def truncate(self, max_length: int) -> None:
"""Truncate the length of the history, dropping the oldest items if necessary
:param max_length: the maximum length of the history, if negative, all history
items will be deleted
:return: nothing
"""
if max_length <= 0:
# remove all history
del self[:]
elif len(self) > max_length:
last_element = len(self) - max_length
del self[0:last_element]
def _build_result_dictionary(
self, start: int, end: int, filter_func: Optional[Callable[[HistoryItem], bool]] = None
) -> 'OrderedDict[int, HistoryItem]':
"""
Build history search results
:param start: start index to search from
:param end: end index to stop searching (exclusive)
"""
results: OrderedDict[int, HistoryItem] = OrderedDict()
for index in range(start, end):
if filter_func is None or filter_func(self[index]):
results[index + 1] = self[index]
return results
def to_json(self) -> str:
"""Utility method to convert this History into a JSON string for use in persistent history files"""
json_dict = {
History._history_version_field: History._history_version,
History._history_items_field: [hi.to_dict() for hi in self],
}
return json.dumps(json_dict, ensure_ascii=False, indent=2)
@staticmethod
def from_json(history_json: str) -> 'History':
"""
Utility method to restore History from a JSON string
:param history_json: history data as JSON string (generated using to_json())
:return: History object
:raises json.JSONDecodeError: if passed invalid JSON string
:raises KeyError: if JSON is missing required elements
:raises ValueError: if history version in JSON isn't supported
"""
json_dict = json.loads(history_json)
version = json_dict[History._history_version_field]
if version != History._history_version:
raise ValueError(
f"Unsupported history file version: {version}. This application uses version {History._history_version}."
)
items = json_dict[History._history_items_field]
history = History()
for hi_dict in items:
history.append(HistoryItem.from_dict(hi_dict))
return history
| mit | d44196885b3a1a598e3ef8c34271442c | 39.011019 | 121 | 0.600661 | 4.374699 | false | false | false | false |
python-cmd2/cmd2 | tests/conftest.py | 1 | 7300 | # coding=utf-8
"""
Cmd2 unit/functional testing
"""
import sys
from contextlib import (
redirect_stderr,
redirect_stdout,
)
from typing import (
List,
Optional,
Union,
)
from unittest import (
mock,
)
from pytest import (
fixture,
)
import cmd2
from cmd2.rl_utils import (
readline,
)
from cmd2.utils import (
StdSim,
)
def verify_help_text(
cmd2_app: cmd2.Cmd, help_output: Union[str, List[str]], verbose_strings: Optional[List[str]] = None
) -> None:
"""This function verifies that all expected commands are present in the help text.
:param cmd2_app: instance of cmd2.Cmd
:param help_output: output of help, either as a string or list of strings
:param verbose_strings: optional list of verbose strings to search for
"""
if isinstance(help_output, str):
help_text = help_output
else:
help_text = ''.join(help_output)
commands = cmd2_app.get_visible_commands()
for command in commands:
assert command in help_text
if verbose_strings:
for verbose_string in verbose_strings:
assert verbose_string in help_text
# Help text for the history command
HELP_HISTORY = """Usage: history [-h] [-r | -e | -o FILE | -t TRANSCRIPT_FILE | -c] [-s] [-x]
[-v] [-a]
[arg]
View, run, edit, save, or clear previously entered commands
positional arguments:
arg empty all history items
a one history item by number
a..b, a:b, a:, ..b items by indices (inclusive)
string items containing string
/regex/ items matching regular expression
optional arguments:
-h, --help show this help message and exit
-r, --run run selected history items
-e, --edit edit and then run selected history items
-o, --output_file FILE
output commands to a script file, implies -s
-t, --transcript TRANSCRIPT_FILE
output commands and results to a transcript file,
implies -s
-c, --clear clear all history
formatting:
-s, --script output commands in script format, i.e. without command
numbers
-x, --expanded output fully parsed commands with any aliases and
macros expanded, instead of typed commands
-v, --verbose display history and include expanded commands if they
differ from the typed command
-a, --all display all commands, including ones persisted from
previous sessions
"""
# Output from the shortcuts command with default built-in shortcuts
SHORTCUTS_TXT = """Shortcuts for other commands:
!: shell
?: help
@: run_script
@@: _relative_run_script
"""
# Output from the set command
SET_TXT = (
"Name Value Description \n"
"==================================================================================================================\n"
"allow_style Terminal Allow ANSI text style sequences in output (valid values: \n"
" Always, Never, Terminal) \n"
"always_show_hint False Display tab completion hint even when completion suggestions\n"
" print \n"
"debug False Show full traceback on exception \n"
"echo False Echo command issued into output \n"
"editor vim Program used by 'edit' \n"
"feedback_to_output False Include nonessentials in '|', '>' results \n"
"max_completion_items 50 Maximum number of CompletionItems to display during tab \n"
" completion \n"
"quiet False Don't print nonessential feedback \n"
"timing False Report execution times \n"
)
def normalize(block):
"""Normalize a block of text to perform comparison.
Strip newlines from the very beginning and very end Then split into separate lines and strip trailing whitespace
from each line.
"""
assert isinstance(block, str)
block = block.strip('\n')
return [line.rstrip() for line in block.splitlines()]
def run_cmd(app, cmd):
"""Clear out and err StdSim buffers, run the command, and return out and err"""
saved_sysout = sys.stdout
sys.stdout = app.stdout
# This will be used to capture app.stdout and sys.stdout
copy_cmd_stdout = StdSim(app.stdout)
# This will be used to capture sys.stderr
copy_stderr = StdSim(sys.stderr)
try:
app.stdout = copy_cmd_stdout
with redirect_stdout(copy_cmd_stdout):
with redirect_stderr(copy_stderr):
app.onecmd_plus_hooks(cmd)
finally:
app.stdout = copy_cmd_stdout.inner_stream
sys.stdout = saved_sysout
out = copy_cmd_stdout.getvalue()
err = copy_stderr.getvalue()
return normalize(out), normalize(err)
@fixture
def base_app():
return cmd2.Cmd(include_py=True, include_ipy=True)
# These are odd file names for testing quoting of them
odd_file_names = ['nothingweird', 'has spaces', '"is_double_quoted"', "'is_single_quoted'"]
def complete_tester(text: str, line: str, begidx: int, endidx: int, app) -> Optional[str]:
"""
This is a convenience function to test cmd2.complete() since
in a unit test environment there is no actual console readline
is monitoring. Therefore we use mock to provide readline data
to complete().
:param text: the string prefix we are attempting to match
:param line: the current input line with leading whitespace removed
:param begidx: the beginning index of the prefix text
:param endidx: the ending index of the prefix text
:param app: the cmd2 app that will run completions
:return: The first matched string or None if there are no matches
Matches are stored in app.completion_matches
These matches also have been sorted by complete()
"""
def get_line():
return line
def get_begidx():
return begidx
def get_endidx():
return endidx
# Run the readline tab completion function with readline mocks in place
with mock.patch.object(readline, 'get_line_buffer', get_line):
with mock.patch.object(readline, 'get_begidx', get_begidx):
with mock.patch.object(readline, 'get_endidx', get_endidx):
return app.complete(text, 0)
| mit | 0a6317a63e32a32758306bb67eb1d3c0 | 37.020833 | 122 | 0.548082 | 4.617331 | false | false | false | false |
python-cmd2/cmd2 | examples/modular_commands/commandset_complex.py | 1 | 1795 | # coding=utf-8
# flake8: noqa E302
"""
Test CommandSet
"""
import argparse
from typing import (
List,
)
import cmd2
@cmd2.with_default_category('Fruits')
class CommandSetA(cmd2.CommandSet):
def do_apple(self, statement: cmd2.Statement):
self._cmd.poutput('Apple!')
def do_banana(self, statement: cmd2.Statement):
"""Banana Command"""
self._cmd.poutput('Banana!!')
cranberry_parser = cmd2.Cmd2ArgumentParser()
cranberry_parser.add_argument('arg1', choices=['lemonade', 'juice', 'sauce'])
@cmd2.with_argparser(cranberry_parser, with_unknown_args=True)
def do_cranberry(self, ns: argparse.Namespace, unknown: List[str]):
self._cmd.poutput('Cranberry {}!!'.format(ns.arg1))
if unknown and len(unknown):
self._cmd.poutput('Unknown: ' + ', '.join(['{}'] * len(unknown)).format(*unknown))
self._cmd.last_result = {'arg1': ns.arg1, 'unknown': unknown}
def help_cranberry(self):
self._cmd.stdout.write('This command does diddly squat...\n')
@cmd2.with_argument_list
@cmd2.with_category('Also Alone')
def do_durian(self, args: List[str]):
"""Durian Command"""
self._cmd.poutput('{} Arguments: '.format(len(args)))
self._cmd.poutput(', '.join(['{}'] * len(args)).format(*args))
def complete_durian(self, text: str, line: str, begidx: int, endidx: int) -> List[str]:
return self._cmd.basic_complete(text, line, begidx, endidx, ['stinks', 'smells', 'disgusting'])
elderberry_parser = cmd2.Cmd2ArgumentParser()
elderberry_parser.add_argument('arg1')
@cmd2.with_category('Alone')
@cmd2.with_argparser(elderberry_parser)
def do_elderberry(self, ns: argparse.Namespace):
self._cmd.poutput('Elderberry {}!!'.format(ns.arg1))
| mit | 84a991b05ef714bab2ae84a5fcfc9354 | 32.867925 | 103 | 0.641226 | 3.06314 | false | false | false | false |
python-cmd2/cmd2 | examples/table_creation.py | 1 | 11090 | #!/usr/bin/env python
# coding=utf-8
"""Examples of using the cmd2 table creation API"""
import functools
import sys
from typing import (
Any,
List,
)
from cmd2 import (
EightBitBg,
EightBitFg,
Fg,
ansi,
)
from cmd2.table_creator import (
AlternatingTable,
BorderedTable,
Column,
HorizontalAlignment,
SimpleTable,
)
# Text styles used in the tables
bold_yellow = functools.partial(ansi.style, fg=Fg.LIGHT_YELLOW, bold=True)
blue = functools.partial(ansi.style, fg=Fg.LIGHT_BLUE)
green = functools.partial(ansi.style, fg=Fg.GREEN)
class DollarFormatter:
"""Example class to show that any object type can be passed as data to TableCreator and converted to a string"""
def __init__(self, val: float) -> None:
self.val = val
def __str__(self) -> str:
"""Returns the value in dollar currency form (e.g. $100.22)"""
return "${:,.2f}".format(self.val)
class Relative:
"""Class used for example data"""
def __init__(self, name: str, relationship: str) -> None:
self.name = name
self.relationship = relationship
class Book:
"""Class used for example data"""
def __init__(self, title: str, year_published: str) -> None:
self.title = title
self.year_published = year_published
class Author:
"""Class used for example data"""
def __init__(self, name: str, birthday: str, place_of_birth: str) -> None:
self.name = name
self.birthday = birthday
self.place_of_birth = place_of_birth
self.books: List[Book] = []
self.relatives: List[Relative] = []
def ansi_print(text):
"""Wraps style_aware_write so style can be stripped if needed"""
ansi.style_aware_write(sys.stdout, text + '\n\n')
def basic_tables():
"""Demonstrates basic examples of the table classes"""
# Table data which demonstrates handling of wrapping and text styles
data_list: List[List[Any]] = list()
data_list.append(["Billy Smith", "123 Sesame St.\n" "Fake Town, USA 33445", DollarFormatter(100333.03)])
data_list.append(
[
"William Longfellow Marmaduke III",
"984 Really Long Street Name Which Will Wrap Nicely\n" "Apt 22G\n" "Pensacola, FL 32501",
DollarFormatter(55135.22),
]
)
data_list.append(
[
"James " + blue("Bluestone"),
bold_yellow("This address has line feeds,\n" "text styles, and wrapping. ")
+ blue("Style is preserved across lines."),
DollarFormatter(300876.10),
]
)
data_list.append(["John Jones", "9235 Highway 32\n" + green("Greenville") + ", SC 29604", DollarFormatter(82987.71)])
# Table Columns (width does not account for any borders or padding which may be added)
columns: List[Column] = list()
columns.append(Column("Name", width=20))
columns.append(Column("Address", width=38))
columns.append(
Column("Income", width=14, header_horiz_align=HorizontalAlignment.RIGHT, data_horiz_align=HorizontalAlignment.RIGHT)
)
st = SimpleTable(columns)
table = st.generate_table(data_list)
ansi_print(table)
bt = BorderedTable(columns)
table = bt.generate_table(data_list)
ansi_print(table)
at = AlternatingTable(columns)
table = at.generate_table(data_list)
ansi_print(table)
def nested_tables():
"""
Demonstrates how to nest tables with styles which conflict with the parent table by setting style_data_text to False.
It also demonstrates coloring various aspects of tables.
"""
# Create data for this example
author_data: List[Author] = []
author_1 = Author("Frank Herbert", "10/08/1920", "Tacoma, Washington")
author_1.books.append(Book("Dune", "1965"))
author_1.books.append(Book("Dune Messiah", "1969"))
author_1.books.append(Book("Children of Dune", "1976"))
author_1.books.append(Book("God Emperor of Dune", "1981"))
author_1.books.append(Book("Heretics of Dune", "1984"))
author_1.books.append(Book("Chapterhouse: Dune", "1985"))
author_1.relatives.append(Relative("Flora Lillian Parkinson", "First Wife"))
author_1.relatives.append(Relative("Beverly Ann Stuart", "Second Wife"))
author_1.relatives.append(Relative("Theresa Diane Shackelford", "Third Wife"))
author_1.relatives.append(Relative("Penelope Herbert", "Daughter"))
author_1.relatives.append(Relative("Brian Patrick Herbert", "Son"))
author_1.relatives.append(Relative("Bruce Calvin Herbert", "Son"))
author_2 = Author("Jane Austen", "12/16/1775", "Steventon, Hampshire, England")
author_2.books.append(Book("Sense and Sensibility", "1811"))
author_2.books.append(Book("Pride and Prejudice", "1813"))
author_2.books.append(Book("Mansfield Park ", "1814"))
author_2.books.append(Book("Emma", "1815"))
author_2.books.append(Book("Northanger Abbey", "1818"))
author_2.books.append(Book("Persuasion", "1818"))
author_2.books.append(Book("Lady Susan", "1871"))
author_2.relatives.append(Relative("James Austen", "Brother"))
author_2.relatives.append(Relative("George Austen", "Brother"))
author_2.relatives.append(Relative("Edward Austen", "Brother"))
author_2.relatives.append(Relative("Henry Thomas Austen", "Brother"))
author_2.relatives.append(Relative("Cassandra Elizabeth Austen", "Sister"))
author_2.relatives.append(Relative("Francis William Austen", "Brother"))
author_2.relatives.append(Relative("Charles John Austen", "Brother"))
author_data.append(author_1)
author_data.append(author_2)
# Define table which presents Author data fields vertically with no header.
# This will be nested in the parent table's first column.
author_columns: List[Column] = list()
author_columns.append(Column("", width=14))
author_columns.append(Column("", width=20))
# The text labels in this table will be bold text. They will also be aligned by the table code.
# When styled text is aligned, a TextStyle.RESET_ALL sequence is inserted between the aligned text
# and the fill characters. Therefore, the Author table will contain TextStyle.RESET_ALL sequences,
# which would interfere with the background color applied by the parent table. To account for this,
# we will manually color the Author tables to match the background colors of the parent AlternatingTable's
# rows and set style_data_text to False in the Author column.
odd_author_tbl = SimpleTable(author_columns, data_bg=EightBitBg.GRAY_0)
even_author_tbl = SimpleTable(author_columns, data_bg=EightBitBg.GRAY_15)
# Define AlternatingTable for books checked out by people in the first table.
# This will be nested in the parent table's second column.
books_columns: List[Column] = list()
books_columns.append(Column(ansi.style("Title", bold=True), width=25))
books_columns.append(
Column(
ansi.style("Published", bold=True),
width=9,
header_horiz_align=HorizontalAlignment.RIGHT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
books_tbl = AlternatingTable(
books_columns,
column_borders=False,
border_fg=EightBitFg.GRAY_15,
header_bg=EightBitBg.GRAY_0,
odd_bg=EightBitBg.GRAY_0,
even_bg=EightBitBg.GRAY_15,
)
# Define BorderedTable for relatives of the author
# This will be nested in the parent table's third column.
relative_columns: List[Column] = list()
relative_columns.append(Column(ansi.style("Name", bold=True), width=25))
relative_columns.append(Column(ansi.style("Relationship", bold=True), width=12))
# Since the header labels are bold, we have the same issue as the Author table. Therefore, we will manually
# color Relatives tables to match the background colors of the parent AlternatingTable's rows and set style_data_text
# to False in the Relatives column.
odd_relatives_tbl = BorderedTable(
relative_columns,
border_fg=EightBitFg.GRAY_15,
border_bg=EightBitBg.GRAY_0,
header_bg=EightBitBg.GRAY_0,
data_bg=EightBitBg.GRAY_0,
)
even_relatives_tbl = BorderedTable(
relative_columns,
border_fg=EightBitFg.GRAY_0,
border_bg=EightBitBg.GRAY_15,
header_bg=EightBitBg.GRAY_15,
data_bg=EightBitBg.GRAY_15,
)
# Define parent AlternatingTable which contains Author and Book tables
parent_tbl_columns: List[Column] = list()
# All of the nested tables already have background colors. Set style_data_text
# to False so the parent AlternatingTable does not apply background color to them.
parent_tbl_columns.append(
Column(ansi.style("Author", bold=True), width=odd_author_tbl.total_width(), style_data_text=False)
)
parent_tbl_columns.append(Column(ansi.style("Books", bold=True), width=books_tbl.total_width(), style_data_text=False))
parent_tbl_columns.append(
Column(ansi.style("Relatives", bold=True), width=odd_relatives_tbl.total_width(), style_data_text=False)
)
parent_tbl = AlternatingTable(
parent_tbl_columns,
column_borders=False,
border_fg=EightBitFg.GRAY_93,
header_bg=EightBitBg.GRAY_0,
odd_bg=EightBitBg.GRAY_0,
even_bg=EightBitBg.GRAY_15,
)
# Construct the tables
parent_table_data: List[List[Any]] = []
for row, author in enumerate(author_data, start=1):
# First build the author table and color it based on row number
author_tbl = even_author_tbl if row % 2 == 0 else odd_author_tbl
# This table has three rows and two columns
table_data = [
[ansi.style("Name", bold=True), author.name],
[ansi.style("Birthday", bold=True), author.birthday],
[ansi.style("Place of Birth", bold=True), author.place_of_birth],
]
# Build the author table string
author_tbl_str = author_tbl.generate_table(table_data, include_header=False, row_spacing=0)
# Now build this author's book table
table_data = [[book.title, book.year_published] for book in author.books]
book_tbl_str = books_tbl.generate_table(table_data)
# Lastly build the relatives table and color it based on row number
relatives_tbl = even_relatives_tbl if row % 2 == 0 else odd_relatives_tbl
table_data = [[relative.name, relative.relationship] for relative in author.relatives]
relatives_tbl_str = relatives_tbl.generate_table(table_data)
# Add these tables to the parent table's data
parent_table_data.append(['\n' + author_tbl_str, '\n' + book_tbl_str + '\n\n', '\n' + relatives_tbl_str + '\n\n'])
# Build the parent table
top_table_str = parent_tbl.generate_table(parent_table_data)
ansi_print(top_table_str)
if __name__ == '__main__':
# Default to terminal mode so redirecting to a file won't include the ANSI style sequences
ansi.allow_style = ansi.AllowStyle.TERMINAL
basic_tables()
nested_tables()
| mit | 5c609dc83ced15a6955a7cd636742fd1 | 38.607143 | 124 | 0.668801 | 3.42919 | false | false | false | false |
pydoit/doit | doit/action.py | 1 | 19895 | """Implements actions used by doit tasks
"""
import os
import sys
import subprocess
import io
from io import StringIO
import inspect
from pathlib import PurePath
from threading import Thread
import pdb
from .exceptions import InvalidTask, TaskFailed, TaskError
def normalize_callable(ref):
"""return a list with (callable, *args, **kwargs)
ref can be a simple callable or a tuple
"""
if isinstance(ref, tuple):
return list(ref)
return [ref, (), {}]
# Actions
class BaseAction(object):
"""Base class for all actions"""
# must implement:
# def execute(self, out=None, err=None)
@staticmethod
def _prepare_kwargs(task, func, args, kwargs):
"""
Prepare keyword arguments (targets, dependencies, changed,
cmd line options)
Inspect python callable and add missing arguments:
- that the callable expects
- have not been passed (as a regular arg or as keyword arg)
- are available internally through the task object
"""
# Return just what was passed in task generator
# dictionary if the task isn't available
if not task:
return kwargs
func_sig = inspect.signature(func)
sig_params = func_sig.parameters.values()
func_has_kwargs = any(p.kind == p.VAR_KEYWORD for p in sig_params)
# use task meta information as extra_args
meta_args = {
'task': lambda: task,
'targets': lambda: list(task.targets),
'dependencies': lambda: list(task.file_dep),
'changed': lambda: list(task.dep_changed),
}
# start with dict passed together on action definition
kwargs = kwargs.copy()
bound_args = func_sig.bind_partial(*args)
# add meta_args
for key in meta_args.keys():
# check key is a positional parameter
if key in func_sig.parameters:
sig_param = func_sig.parameters[key]
# it is forbidden to use default values for this arguments
# because the user might be unaware of this magic.
if (sig_param.default != sig_param.empty):
msg = (f"Task {task.name}, action {func.__name__}():"
f"The argument '{key}' is not allowed to have "
"a default value (reserved by doit)")
raise InvalidTask(msg)
# if value not taken from position parameter
if key not in bound_args.arguments:
kwargs[key] = meta_args[key]()
# add tasks parameter options
opt_args = dict(task.options)
if task.pos_arg is not None:
opt_args[task.pos_arg] = task.pos_arg_val
for key in opt_args.keys():
# check key is a positional parameter
if key in func_sig.parameters:
# if value not taken from position parameter
if key not in bound_args.arguments:
kwargs[key] = opt_args[key]
# if function has **kwargs include extra_arg on it
elif func_has_kwargs and key not in kwargs:
kwargs[key] = opt_args[key]
return kwargs
class CmdAction(BaseAction):
"""
Command line action. Spawns a new process.
@ivar action(str,list,callable): subprocess command string or string list,
see subprocess.Popen first argument.
It may also be a callable that generates the command string.
Strings may contain python mappings with the keys: dependencies,
changed and targets. ie. "zip %(targets)s %(changed)s"
@ivar task(Task): reference to task that contains this action
@ivar save_out: (str) name used to save output in `values`
@ivar shell: use shell to execute command
see subprocess.Popen `shell` attribute
@ivar encoding (str): encoding of the process output
@ivar decode_error (str): value for decode() `errors` param
while decoding process output
@ivar pkwargs: Popen arguments except 'stdout' and 'stderr'
"""
STRING_FORMAT = 'old'
def __init__(self, action, task=None, save_out=None, shell=True,
encoding='utf-8', decode_error='replace', buffering=0,
**pkwargs): # pylint: disable=W0231
'''
:ivar buffering: (int) stdout/stderr buffering.
Not to be confused with subprocess buffering
- 0 -> line buffering
- positive int -> number of bytes
'''
for forbidden in ('stdout', 'stderr'):
if forbidden in pkwargs:
msg = "CmdAction can't take param named '{0}'."
raise InvalidTask(msg.format(forbidden))
self._action = action
self.task = task
self.out = None
self.err = None
self.result = None
self.values = {}
self.save_out = save_out
self.shell = shell
self.encoding = encoding
self.decode_error = decode_error
self.pkwargs = pkwargs
self.buffering = buffering
@property
def action(self):
if isinstance(self._action, (str, list)):
return self._action
else:
# action can be a callable that returns a string command
ref, args, kw = normalize_callable(self._action)
kwargs = self._prepare_kwargs(self.task, ref, args, kw)
return ref(*args, **kwargs)
def _print_process_output(self, process, input_, capture, realtime):
"""Reads 'input_' until process is terminated.
Writes 'input_' content to 'capture' (string)
and 'realtime' stream
"""
if self.buffering:
read = lambda: input_.read(self.buffering)
else:
# line buffered
read = lambda: input_.readline()
while True:
try:
line = read().decode(self.encoding, self.decode_error)
except Exception:
# happens when fails to decoded input
process.terminate()
input_.read()
raise
if not line:
break
capture.write(line)
if realtime:
realtime.write(line)
realtime.flush() # required if on byte buffering mode
def execute(self, out=None, err=None):
"""
Execute command action
both stdout and stderr from the command are captured and saved
on self.out/err. Real time output is controlled by parameters
@param out: None - no real time output
a file like object (has write method)
@param err: idem
@return failure:
- None: if successful
- TaskError: If subprocess return code is greater than 125
- TaskFailed: If subprocess return code isn't zero (and
not greater than 125)
"""
try:
action = self.expand_action()
except Exception as exc:
return TaskError(
"CmdAction Error creating command string", exc)
# set environ to change output buffering
subprocess_pkwargs = self.pkwargs.copy()
env = None
if 'env' in subprocess_pkwargs:
env = subprocess_pkwargs['env']
del subprocess_pkwargs['env']
if self.buffering:
if not env:
env = os.environ.copy()
env['PYTHONUNBUFFERED'] = '1'
capture_io = self.task.io.capture if self.task else True
if capture_io:
p_out = p_err = subprocess.PIPE
else:
if capture_io is False:
p_out = out
p_err = err
else: # None
p_out = p_err = open(os.devnull, "w")
# spawn task process
process = subprocess.Popen(
action,
shell=self.shell,
# bufsize=2, # ??? no effect use PYTHONUNBUFFERED instead
stdout=p_out,
stderr=p_err,
env=env,
**subprocess_pkwargs)
if capture_io:
output = StringIO()
errput = StringIO()
t_out = Thread(target=self._print_process_output,
args=(process, process.stdout, output, out))
t_err = Thread(target=self._print_process_output,
args=(process, process.stderr, errput, err))
t_out.start()
t_err.start()
t_out.join()
t_err.join()
self.out = output.getvalue()
self.err = errput.getvalue()
self.result = self.out + self.err
# make sure process really terminated
process.wait()
# task error - based on:
# http://www.gnu.org/software/bash/manual/bashref.html#Exit-Status
# it doesnt make so much difference to return as Error or Failed anyway
if process.returncode > 125:
return TaskError("Command error: '%s' returned %s" %
(action, process.returncode))
# task failure
if process.returncode != 0:
return TaskFailed("Command failed: '%s' returned %s" %
(action, process.returncode))
# save stdout in values
if self.save_out:
self.values[self.save_out] = self.out
def expand_action(self):
"""Expand action using task meta informations if action is a string.
Convert `Path` elements to `str` if action is a list.
@returns: string -> expanded string if action is a string
list - string -> expanded list of command elements
"""
if not self.task:
return self.action
# cant expand keywords if action is a list of strings
if isinstance(self.action, list):
action = []
for element in self.action:
if isinstance(element, str):
action.append(element)
elif isinstance(element, PurePath):
action.append(str(element))
else:
msg = ("%s. CmdAction element must be a str "
"or Path from pathlib. Got '%r' (%s)")
raise InvalidTask(
msg % (self.task.name, element, type(element)))
return action
subs_dict = {
'targets': " ".join(self.task.targets),
'dependencies': " ".join(self.task.file_dep),
}
# dep_changed is set on get_status()
# Some commands (like `clean` also uses expand_args but do not
# uses get_status, so `changed` is not available.
if self.task.dep_changed is not None:
subs_dict['changed'] = " ".join(self.task.dep_changed)
# task option parameters
subs_dict.update(self.task.options)
# convert positional parameters from list space-separated string
if self.task.pos_arg:
if self.task.pos_arg_val:
pos_val = ' '.join(self.task.pos_arg_val)
else:
pos_val = ''
subs_dict[self.task.pos_arg] = pos_val
if self.STRING_FORMAT == 'old':
return self.action % subs_dict
elif self.STRING_FORMAT == 'new':
return self.action.format(**subs_dict)
else:
assert self.STRING_FORMAT == 'both'
return self.action.format(**subs_dict) % subs_dict
def __str__(self):
return "Cmd: %s" % self._action
def __repr__(self):
return "<CmdAction: '%s'>" % str(self._action)
class Writer(object):
"""Write to N streams.
This is used on python-actions to allow the stream to be output to terminal
and captured at the same time.
"""
def __init__(self, *writers):
"""@param writers - file stream like objects"""
self.writers = []
self.orig_stream = None # The original stream terminal/file
for writer in writers:
self.add_writer(writer)
def add_writer(self, stream, *, is_original=False):
"""adds a stream to the list of writers
@param is: (bool) if specified overwrites real isatty from stream
"""
self.writers.append(stream)
if is_original:
self.orig_stream = stream
def write(self, text):
"""write 'text' to all streams"""
for stream in self.writers:
stream.write(text)
def flush(self):
"""flush all streams"""
for stream in self.writers:
stream.flush()
def isatty(self):
if self.orig_stream:
return self.orig_stream.isatty()
return False
def fileno(self):
if self.orig_stream:
return self.orig_stream.fileno()
raise io.UnsupportedOperation()
class PythonAction(BaseAction):
"""Python action. Execute a python callable.
@ivar py_callable: (callable) Python callable
@ivar args: (sequence) Extra arguments to be passed to py_callable
@ivar kwargs: (dict) Extra keyword arguments to be passed to py_callable
@ivar task(Task): reference to task that contains this action
@ivar pm_pdb: if True drop into PDB on exception when executing task
"""
pm_pdb = False
def __init__(self, py_callable, args=None, kwargs=None, task=None):
# pylint: disable=W0231
self.py_callable = py_callable
self.task = task
self.out = None
self.err = None
self.result = None
self.values = {}
if args is None:
self.args = []
else:
self.args = args
if kwargs is None:
self.kwargs = {}
else:
self.kwargs = kwargs
# check valid parameters
if not hasattr(self.py_callable, '__call__'):
msg = "%r PythonAction must be a 'callable' got %r."
raise InvalidTask(msg % (self.task, self.py_callable))
if inspect.isclass(self.py_callable):
msg = "%r PythonAction can not be a class got %r."
raise InvalidTask(msg % (self.task, self.py_callable))
if inspect.isbuiltin(self.py_callable):
msg = "%r PythonAction can not be a built-in got %r."
raise InvalidTask(msg % (self.task, self.py_callable))
if type(self.args) is not tuple and type(self.args) is not list:
msg = "%r args must be a 'tuple' or a 'list'. got '%s'."
raise InvalidTask(msg % (self.task, self.args))
if type(self.kwargs) is not dict:
msg = "%r kwargs must be a 'dict'. got '%s'"
raise InvalidTask(msg % (self.task, self.kwargs))
def _prepare_kwargs(self):
return BaseAction._prepare_kwargs(self.task, self.py_callable,
self.args, self.kwargs)
def execute(self, out=None, err=None):
"""Execute command action
both stdout and stderr from the command are captured and saved
on self.out/err. Real time output is controlled by parameters
@param out: None - no real time output
a file like object (has write method)
@param err: idem
@return failure: see CmdAction.execute
"""
capture_io = self.task.io.capture if self.task else True
if capture_io:
# set std stream
old_stdout = sys.stdout
output = StringIO()
out_writer = Writer()
# capture output but preserve isatty() from original stream
out_writer.add_writer(output)
if out:
out_writer.add_writer(out, is_original=True)
sys.stdout = out_writer
old_stderr = sys.stderr
errput = StringIO()
err_writer = Writer()
err_writer.add_writer(errput)
if err:
err_writer.add_writer(err, is_original=True)
sys.stderr = err_writer
else:
if out:
old_stdout = sys.stdout
sys.stdout = out
if err:
old_stderr = sys.stderr
sys.stderr = err
kwargs = self._prepare_kwargs()
# execute action / callable
try:
returned_value = self.py_callable(*self.args, **kwargs)
except Exception as exception:
if self.pm_pdb: # pragma: no cover
# start post-mortem debugger
deb = pdb.Pdb(stdin=sys.__stdin__, stdout=sys.__stdout__)
deb.reset()
deb.interaction(None, sys.exc_info()[2])
return TaskError("PythonAction Error", exception)
finally:
# restore std streams /log captured streams
if capture_io:
sys.stdout = old_stdout
sys.stderr = old_stderr
self.out = output.getvalue()
self.err = errput.getvalue()
else:
if out:
sys.stdout = old_stdout
if err:
sys.stderr = old_stderr
# if callable returns false. Task failed
if returned_value is False:
return TaskFailed("Python Task failed: '%s' returned %s" %
(self.py_callable, returned_value))
elif returned_value is True or returned_value is None:
pass
elif isinstance(returned_value, str):
self.result = returned_value
elif isinstance(returned_value, dict):
self.values = returned_value
self.result = returned_value
elif isinstance(returned_value, (TaskFailed, TaskError)):
return returned_value
else:
return TaskError("Python Task error: '%s'. It must return:\n"
"False for failed task.\n"
"True, None, string or dict for successful task\n"
"returned %s (%s)" %
(self.py_callable, returned_value,
type(returned_value)))
def __str__(self):
# get object description excluding runtime memory address
return "Python: %s" % str(self.py_callable)[1:].split(' at ')[0]
def __repr__(self):
return "<PythonAction: '%s'>" % (repr(self.py_callable))
def create_action(action, task_ref, param_name):
"""
Create action using proper constructor based on the parameter type
@param action: Action to be created
@type action: L{BaseAction} subclass object, str, tuple or callable
@param task_ref: Task object this action belongs to
@param param_name: str, name of task param. i.e actions, teardown, clean
@raise InvalidTask: If action parameter type isn't valid
"""
if isinstance(action, BaseAction):
action.task = task_ref
return action
if isinstance(action, str):
return CmdAction(action, task_ref, shell=True)
if isinstance(action, list):
return CmdAction(action, task_ref, shell=False)
if isinstance(action, tuple):
if len(action) > 3:
msg = "Task '{}': invalid '{}' tuple length. got: {!r} {}".format(
task_ref.name, param_name, action, type(action))
raise InvalidTask(msg)
py_callable, args, kwargs = (list(action) + [None] * (3 - len(action)))
return PythonAction(py_callable, args, kwargs, task_ref)
if hasattr(action, '__call__'):
return PythonAction(action, task=task_ref)
msg = "Task '{}': invalid '{}' type. got: {!r} {}".format(
task_ref.name, param_name, action, type(action))
raise InvalidTask(msg)
| mit | d8a94923119cc4437033a552839886cd | 34.782374 | 79 | 0.55813 | 4.399602 | false | false | false | false |
pydoit/doit | doit/exceptions.py | 1 | 3303 | """Handle exceptions generated from 'user' code"""
import sys
import traceback
class InvalidCommand(Exception):
"""Invalid command line argument."""
def __init__(self, *args, **kwargs):
self.not_found = kwargs.pop('not_found', None)
super(InvalidCommand, self).__init__(*args, **kwargs)
self.cmd_used = None
self.bin_name = 'doit' # default but might be overwriten
def __str__(self):
if self.not_found is None:
return super(InvalidCommand, self).__str__()
if self.cmd_used:
msg_task_not_found = (
'command `{cmd_used}` invalid parameter: "{not_found}".'
' Must be a task, or a target.\n'
'Type "{bin_name} list" to see available tasks')
return msg_task_not_found.format(**self.__dict__)
else:
msg_cmd_task_not_found = (
'Invalid parameter: "{not_found}".'
' Must be a command, task, or a target.\n'
'Type "{bin_name} help" to see available commands.\n'
'Type "{bin_name} list" to see available tasks.\n')
return msg_cmd_task_not_found.format(**self.__dict__)
class InvalidDodoFile(Exception):
"""Invalid dodo file"""
pass
class InvalidTask(Exception):
"""Invalid task instance. User error on specifying the task."""
pass
class CatchedException():
"""DEPRECATED, use BaseFail instead. 2022-04-22 0.36.0 release.
Wrong grammar and not all BaseFail contains an Exception
:ivar report: used by (some) reporters to decide if Failure/Error should be in printed
"""
def __init__(self, msg, exception=None, report=True):
self.message = msg
self.traceback = ''
self.report = report
# It would be nice to include original exception, but they are not always pickable
# https://stackoverflow.com/questions/49715881/how-to-pickle-inherited-exceptions
if isinstance(exception, BaseFail):
self.traceback = exception.traceback
elif exception is not None:
self.traceback = traceback.format_exception(
exception.__class__, exception, sys.exc_info()[2])
def get_msg(self):
"""return full exception description (includes traceback)"""
return "%s\n%s" % (self.message, "".join(self.traceback))
def get_name(self):
"""get fail kind name"""
return self.__class__.__name__
def __repr__(self):
return "(<%s> %s)" % (self.get_name(), self.message)
def __str__(self):
return "%s\n%s" % (self.get_name(), self.get_msg())
class BaseFail(CatchedException):
"""This used to save info Task failures/errors
Might contain a caught Exception.
"""
pass
class TaskFailed(BaseFail):
"""Task execution was not successful."""
pass
class TaskError(BaseFail):
"""Error while trying to execute task."""
pass
class UnmetDependency(TaskError):
"""Task was not executed because a dependent task failed or is ignored"""
pass
class SetupError(TaskError):
"""Error while trying to execute setup object"""
pass
class DependencyError(TaskError):
"""Error while trying to check if task is up-to-date or saving task status"""
pass
| mit | d1d8824272064c9d70e5aacb521dd759 | 29.027273 | 90 | 0.61096 | 4.077778 | false | false | false | false |
pydoit/doit | tests/test_cmd_clean.py | 1 | 6519 | from io import StringIO
from unittest import mock
import pytest
from doit.exceptions import InvalidCommand
from doit.task import Task
from doit.cmd_clean import Clean
from .conftest import CmdFactory
class TestCmdClean(object):
@pytest.fixture
def tasks(self, request):
self.cleaned = []
def myclean(name):
self.cleaned.append(name)
return [
Task("t1", None, targets=['t1.out'], setup=['t2'],
clean=[(myclean,('t1',))]),
Task("t2", None, clean=[(myclean,('t2',))]),
Task("t3", None, task_dep=['t3:a'], has_subtask=True,
clean=[(myclean,('t3',))]),
Task("t3:a", None, clean=[(myclean,('t3:a',))], subtask_of='t3'),
Task("t4", None, file_dep=['t1.out'], clean=[(myclean,('t4',))] ),
]
def test_clean_all(self, tasks):
output = StringIO()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
dep_manager=mock.MagicMock())
cmd_clean._execute(dryrun=False, cleandep=False, cleanall=True,
cleanforget=False)
# all enables --clean-dep
assert ['t4', 't1', 't2', 't3', 't3:a'] == self.cleaned
def test_clean_default_all(self, tasks):
output = StringIO()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
dep_manager=mock.MagicMock())
cmd_clean._execute(dryrun=False, cleandep=False, cleanall=False,
cleanforget=False)
# default enables --clean-dep
assert ['t4', 't1', 't2', 't3', 't3:a'] == self.cleaned
def test_clean_default(self, tasks):
output = StringIO()
cmd_clean = CmdFactory(
Clean, outstream=output, task_list=tasks,
sel_tasks=['t1'], dep_manager=mock.MagicMock())
cmd_clean._execute(dryrun=False, cleandep=False, cleanall=False,
cleanforget=False)
# default enables --clean-dep
assert ['t1', 't2'] == self.cleaned
def test_clean_selected(self, tasks):
output = StringIO()
mock_dep_manager = mock.MagicMock()
cmd_clean = CmdFactory(
Clean, outstream=output, task_list=tasks,
sel_tasks=['t1'], dep_manager=mock_dep_manager)
cmd_clean._execute(dryrun=False, cleandep=False, cleanall=False,
cleanforget=False, pos_args=['t2'])
assert ['t2'] == self.cleaned
mock_dep_manager.remove.assert_not_called()
def test_clean_selected_wildcard(self, tasks):
output = StringIO()
mock_dep_manager = mock.MagicMock()
cmd_clean = CmdFactory(
Clean, outstream=output, task_list=tasks,
dep_manager=mock_dep_manager)
cmd_clean._execute(dryrun=False, cleandep=False, cleanall=False,
cleanforget=False, pos_args=['t3*'])
assert ['t3', 't3:a'] == self.cleaned
mock_dep_manager.remove.assert_not_called()
def test_clean_taskdep(self, tasks):
output = StringIO()
mock_dep_manager = mock.MagicMock()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
dep_manager=mock_dep_manager)
cmd_clean._execute(dryrun=False, cleandep=True, cleanall=False,
cleanforget=False, pos_args=['t1'])
assert ['t1', 't2'] == self.cleaned
mock_dep_manager.remove.assert_not_called()
def test_clean_taskdep_recursive(self, tasks):
output = StringIO()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
dep_manager=mock.MagicMock())
cmd_clean._execute(dryrun=False, cleandep=True, cleanall=False,
cleanforget=False, pos_args=['t4'])
assert ['t4', 't1', 't2'] == self.cleaned
def test_clean_subtasks(self, tasks):
output = StringIO()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
dep_manager=mock.MagicMock())
cmd_clean._execute(dryrun=False, cleandep=False, cleanall=False,
cleanforget=False, pos_args=['t3'])
assert ['t3', 't3:a'] == self.cleaned
def test_clean_taskdep_once(self, tasks):
# do not execute clean operation more than once
output = StringIO()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
dep_manager=mock.MagicMock())
cmd_clean._execute(dryrun=False, cleandep=True, cleanall=False,
cleanforget=False, pos_args=['t1', 't2'])
assert ['t1', 't2'] == self.cleaned
def test_clean_invalid_task(self, tasks):
output = StringIO()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
sel_tasks=['t1'])
pytest.raises(InvalidCommand, cmd_clean._execute,
dryrun=False, cleandep=False, cleanall=False,
cleanforget=False, pos_args=['xxxx'])
def test_clean_forget_selected(self, tasks):
output = StringIO()
mock_dep_manager = mock.MagicMock()
cmd_clean = CmdFactory(
Clean, outstream=output, task_list=tasks,
sel_tasks=['t1'], dep_manager=mock_dep_manager)
cmd_clean._execute(dryrun=False, cleandep=False, cleanall=False,
cleanforget=True, pos_args=['t2'])
assert ['t2'] == self.cleaned
# order
mock_dep_manager.assert_has_calls(
[mock.call.remove(mock.ANY), mock.call.close()])
# exactly t2, not more
assert (mock_dep_manager.remove.call_args_list ==
[mock.call('t2')])
def test_clean_forget_taskdep(self, tasks):
output = StringIO()
mock_dep_manager = mock.MagicMock()
cmd_clean = CmdFactory(Clean, outstream=output, task_list=tasks,
dep_manager=mock_dep_manager)
cmd_clean._execute(dryrun=False, cleandep=True, cleanall=False,
cleanforget=True, pos_args=['t1'])
assert ['t1', 't2'] == self.cleaned
# order
mock_dep_manager.assert_has_calls(
[mock.call.remove(mock.ANY), mock.call.close()])
assert (mock_dep_manager.remove.call_args_list ==
[mock.call('t1'), mock.call('t2')])
| mit | a83f071c60c36880de18fe026b85de80 | 42.751678 | 78 | 0.56435 | 3.796738 | false | true | false | false |
grow/grow | grow/translators/google_sheets_test.py | 1 | 19110 | """Tests for Google Sheets translations."""
import unittest
import mock
from nose.plugins import skip
from googleapiclient import errors
from grow.preprocessors import google_drive
from grow.common import oauth
from grow.pods import pods
from grow import storage
from grow.testing import google_service
from grow.testing import testing
from grow.translators import errors as translator_errors
from . import google_sheets
class GoogleSheetsTranslatorTestCase(unittest.TestCase):
def setUp(self):
dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(dir_path, storage=storage.FileStorage)
def test_upload_translations(self):
credentials, _ = oauth.get_credentials_and_storage(
scope=google_sheets.OAUTH_SCOPE,
storage_key=google_drive.STORAGE_KEY)
if not credentials:
text = ('Skipping Google Sheets Translator test'
' because we don\'t have auth keys. Run'
' `grow upload_translations` or `grow download_translations`'
' to acquire auth keys and re-run the test.')
raise skip.SkipTest(text)
translator = self.pod.get_translator('google_sheets')
translator.upload(locales=['de'])
class GoogleSheetsTranslatorMockTestCase(unittest.TestCase):
def _setup_mocks(self, sheets_create=None, sheets_get=None, sheets_values=None,
sheets_batch_values=None):
if sheets_create is None:
sheets_create = {
'spreadsheetId': '98765',
'sheets': [
{
'properties': {
'sheetId': '1234',
'title': 'es',
}
}
]
}
if sheets_get is None:
sheets_get = {
'spreadsheetId': 76543,
}
mock_drive_service = google_service.GoogleServiceMock.mock_drive_service()
mock_sheets_service = google_service.GoogleServiceMock.mock_sheets_service(
create=sheets_create, get=sheets_get, values=sheets_values,
batch_values=sheets_batch_values)
return mock_drive_service, mock_sheets_service
def setUp(self):
dir_path = testing.create_test_pod_dir()
self.pod = pods.Pod(dir_path, storage=storage.FileStorage)
@mock.patch.object(google_sheets.GoogleSheetsTranslator, '_generate_new_sheet_id')
@mock.patch.object(google_sheets.GoogleSheetsTranslator, '_create_service')
@mock.patch.object(google_drive.BaseGooglePreprocessor, 'create_service')
def test_create_sheet(self, mock_service_drive, mock_service_sheets, mock_new_sheet_id):
mock_drive_service, mock_sheets_service = self._setup_mocks()
mock_service_drive.return_value = mock_drive_service['service']
mock_service_sheets.return_value = mock_sheets_service['service']
mock_new_sheet_id.side_effect = [765, 654, 543, 432, 321]
translator = self.pod.get_translator('google_sheets')
translator.upload(locales=['de'])
requests = google_service.GoogleServiceMock.get_batch_requests(
mock_sheets_service['spreadsheets.batchUpdate'])
# Creates the sheet.
self.assertIn({
'addSheet': {
'properties': {
'gridProperties': {
'columnCount': 4,
'rowCount': 2,
'frozenColumnCount': 1,
'frozenRowCount': 1
},
'sheetId': 765,
'title': 'de'
}
}
}, requests)
# Formats the columns.
self.assertIn({
'updateDimensionProperties': {
'fields': 'pixelSize',
'range': {
'endIndex': 3,
'startIndex': 0,
'sheetId': 765,
'dimension': 'COLUMNS'
},
'properties': {
'pixelSize': 400
}
}
}, requests)
self.assertIn({
'updateDimensionProperties': {
'fields': 'pixelSize',
'range': {
'endIndex': 4,
'startIndex': 3,
'sheetId': 765,
'dimension': 'COLUMNS'
},
'properties': {
'pixelSize': 200
}
}
}, requests)
self.assertIn({
'updateDimensionProperties': {
'fields': 'hiddenByUser',
'range': {
'endIndex': 3,
'startIndex': 2,
'sheetId': 765,
'dimension': 'COLUMNS'
},
'properties': {
'hiddenByUser': False
}
}
}, requests)
@mock.patch.object(google_sheets.GoogleSheetsTranslator, '_create_service')
@mock.patch.object(google_drive.BaseGooglePreprocessor, 'create_service')
def test_download(self, mock_service_drive, mock_service_sheets):
mock_drive_service, mock_sheets_service = self._setup_mocks(sheets_get={
'spreadsheetId': 'A1B2C3D4E5F6',
'sheets': [{
'properties': {
'title': 'de',
'sheetId': 765,
},
}]
}, sheets_batch_values={
'valueRanges': [
{
'values': [
['en', 'es'],
['jimbo', 'jimmy'],
[],
[''],
['suzette', 'sue'],
],
},
],
})
mock_service_drive.return_value = mock_drive_service['service']
mock_service_sheets.return_value = mock_sheets_service['service']
translator = self.pod.get_translator('google_sheets')
self.pod.write_yaml(translator.TRANSLATOR_STATS_PATH, {
'google_sheets': {
'de': {
'ident': 'A1B2C3D4E5F6',
'source_lang': 'en',
'uploaded': '2017-06-02T13:17:57.727879',
'url': 'https://docs.google.com/spreadsheets/d/A1B2C3D4E5F6#gid=12345',
},
},
})
new_stats = translator.download(locales=['de'])
new_stat = new_stats[0]
self.assertEqual('en', new_stat.source_lang)
self.assertEqual('de', new_stat.lang)
self.assertEqual('A1B2C3D4E5F6', new_stat.ident)
@mock.patch.object(google_sheets.GoogleSheetsTranslator, '_create_service')
@mock.patch.object(google_drive.BaseGooglePreprocessor, 'create_service')
def test_download_error(self, mock_service_drive, mock_service_sheets):
mock_drive_service, mock_sheets_service = self._setup_mocks(sheets_get={
'spreadsheetId': 'A1B2C3D4E5F6',
'sheets': [{
'properties': {
'title': 'de',
'sheetId': 765,
},
}]
})
mock_service_drive.return_value = mock_drive_service['service']
mock_service_sheets.return_value = mock_sheets_service['service']
mock_sheets_service['spreadsheets.values.batchGet'].execute.side_effect = errors.HttpError(
{'status': '400'}, bytes('', 'utf-8'))
translator = self.pod.get_translator('google_sheets')
self.pod.write_yaml(translator.TRANSLATOR_STATS_PATH, {
'google_sheets': {
'de': {
'ident': 'A1B2C3D4E5F6',
'source_lang': 'en',
'uploaded': '2017-06-02T13:17:57.727879',
'url': 'https://docs.google.com/spreadsheets/d/A1B2C3D4E5F6#gid=12345',
},
},
})
with self.assertRaises(translator_errors.NotFoundError):
translator.download(locales=['de'])
# Error is caught by the base download.
self.assertTrue(
mock_sheets_service['spreadsheets.values.batchGet'].execute.called)
@mock.patch.object(google_sheets.GoogleSheetsTranslator, '_create_service')
@mock.patch.object(google_drive.BaseGooglePreprocessor, 'create_service')
def test_update_meta(self, mock_service_drive, mock_service_sheets):
mock_drive_service, mock_sheets_service = self._setup_mocks(sheets_get={
'spreadsheetId': 'A1B2C3D4E5F6',
'sheets': [{
'properties': {
'title': 'de',
'sheetId': 765,
},
}]
})
mock_service_drive.return_value = mock_drive_service['service']
mock_service_sheets.return_value = mock_sheets_service['service']
translator = self.pod.get_translator('google_sheets')
self.pod.write_yaml(translator.TRANSLATOR_STATS_PATH, {
'google_sheets': {
'de': {
'ident': 'A1B2C3D4E5F6',
'source_lang': 'en',
'uploaded': '2017-06-02T13:17:57.727879',
'url': 'https://docs.google.com/spreadsheets/d/A1B2C3D4E5F6#gid=12345',
},
},
})
translator.update_meta(locales=['de'])
requests = google_service.GoogleServiceMock.get_batch_requests(
mock_sheets_service['spreadsheets.batchUpdate'])
# Style the header cells.
self.assertIn({
'repeatCell': {
'cell': {
'userEnteredFormat': {
'textFormat': {
'bold': True
},
'backgroundColor': {
'blue': 0.933,
'green': 0.933,
'red': 0.933
}
}
},
'fields': 'userEnteredFormat',
'range': {
'endRowIndex': 1,
'startRowIndex': 0,
'sheetId': 765,
'startColumnIndex': 0
}
}
}, requests)
# Wrap translations and comments.
self.assertIn({
'repeatCell': {
'cell': {
'userEnteredFormat': {
'wrapStrategy': 'WRAP'
}
},
'fields': 'userEnteredFormat',
'range': {
'endColumnIndex': 3,
'sheetId': 765,
'startColumnIndex': 0,
'startRowIndex': 1
}
}
}, requests)
# Muted syles on locations.
self.assertIn({
'repeatCell': {
'cell': {
'userEnteredFormat': {
'textFormat': {
'foregroundColor': {
'blue': 0.6196,
'green': 0.6196,
'red': 0.6196
}
},
'wrapStrategy': 'WRAP'
}
},
'fields': 'userEnteredFormat',
'range': {
'endColumnIndex': 4,
'sheetId': 765,
'startColumnIndex': 2,
'startRowIndex': 1
}
}
}, requests)
# Conditional formatting for missing translations.
self.assertIn({
'addConditionalFormatRule': {
'index': 0,
'rule': {
'ranges': [{
'endColumnIndex': 2,
'sheetId': 765,
'startColumnIndex': 1,
'startRowIndex': 1
}],
'booleanRule': {
'condition': {
'type': 'BLANK'
},
'format': {
'backgroundColor': {
'blue': 0.964,
'green': 0.905,
'red': 0.929
}
}
}
}
}
}, requests)
# Protect source values.
self.assertIn({
'addProtectedRange': {
'protectedRange': {
'warningOnly': True,
'protectedRangeId': 1000766,
'range': {
'endColumnIndex': 1,
'sheetId': 765,
'startColumnIndex': 0,
'startRowIndex': 1
},
'description': 'Original strings can only be edited in the source files.'
}
}
}, requests)
# Protect auto comments.
self.assertIn({
'addProtectedRange': {
'protectedRange': {
'warningOnly': True,
'protectedRangeId': 1000767,
'range': {
'endColumnIndex': 3,
'sheetId': 765,
'startColumnIndex': 2,
'startRowIndex': 1
},
'description': 'Comment strings can only be edited in the source files.'
}
}
}, requests)
# Protect locations.
self.assertIn({
'addProtectedRange': {
'protectedRange': {
'warningOnly': True,
'protectedRangeId': 1000768,
'range': {
'endColumnIndex': 4,
'sheetId': 765,
'startColumnIndex': 3,
'startRowIndex': 1
},
'description': 'Source strings can only be edited in the source files.'
}
}
}, requests)
# Filter View for Untranslated Strings.
self.assertIn({
'addFilterView': {
'filter': {
'range': {
'endColumnIndex': 4,
'sheetId': 765,
'startColumnIndex': 0,
'startRowIndex': 0
},
'criteria': {
'1': {
'condition': {
'type': 'BLANK'
}
}
},
'filterViewId': 3300766,
'title': 'Untranslated Strings'
}
}
}, requests)
@mock.patch.object(google_sheets.GoogleSheetsTranslator, '_create_service')
@mock.patch.object(google_drive.BaseGooglePreprocessor, 'create_service')
def test_upload_translations(self, mock_service_drive, mock_service_sheets):
mock_drive_service, mock_sheets_service = self._setup_mocks(sheets_get={
'spreadsheetId': 'A1B2C3D4E5F6',
'sheets': [{
'properties': {
'title': 'de',
'sheetId': 765,
},
}]
}, sheets_values={
'values': [
['en', 'es'],
['jimbo', 'jimmy'],
[],
[''],
['suzette', 'sue'],
],
})
mock_service_drive.return_value = mock_drive_service['service']
mock_service_sheets.return_value = mock_sheets_service['service']
translator = self.pod.get_translator('google_sheets')
self.pod.write_yaml(translator.TRANSLATOR_STATS_PATH, {
'google_sheets': {
'de': {
'ident': 'A1B2C3D4E5F6',
'source_lang': 'en',
'uploaded': '2017-06-02T13:17:57.727879',
'url': 'https://docs.google.com/spreadsheets/d/A1B2C3D4E5F6#gid=12345',
},
},
})
translator.upload(locales=['de'])
requests = google_service.GoogleServiceMock.get_batch_requests(
mock_sheets_service['spreadsheets.batchUpdate'])
# Expands the column count.
self.assertIn({
'appendDimension': {
'length': 2,
'sheetId': 765,
'dimension': 'COLUMNS'
}
}, requests)
# Update the cells.
self.assertIn({
'updateCells': {
'fields': 'userEnteredValue',
'rows': [{
'values': [{
'userEnteredValue': {
'stringValue': 'jimbo'
}
}, {
'userEnteredValue': {
'stringValue': 'jimmy'
}
}, {
'userEnteredValue': {
'stringValue': ''
}
}, {
'userEnteredValue': {
'stringValue': ''
}
}]
}, {
'values': [{
'userEnteredValue': {
'stringValue': 'suzette'
}
}, {
'userEnteredValue': {
'stringValue': 'sue'
}
}, {
'userEnteredValue': {
'stringValue': ''
}
}, {
'userEnteredValue': {
'stringValue': ''
}
}]
}],
'start': {
'rowIndex': 1,
'columnIndex': 0,
'sheetId': 765
}
}
}, requests)
# Sort the range
self.assertIn({
'sortRange': {
'range': {
'startRowIndex': 1,
'sheetId': 765,
'startColumnIndex': 0
},
'sortSpecs': [{
'sortOrder': 'ASCENDING',
'dimensionIndex': 0
}]
}
}, requests)
if __name__ == '__main__':
unittest.main()
| mit | c57396c378938a18f3e74ba4a5b37dc9 | 34.388889 | 99 | 0.425955 | 4.746647 | false | false | false | false |
grow/grow | grow/performance/profile_report.py | 1 | 3158 | """Profiling report for analyizing the performance of the app"""
class ProfileReport:
"""Analyzes the timers to report on the app timing."""
def __init__(self, profile):
self.profile = profile
self.items = {}
for timer in self.profile:
if timer.key not in self.items:
self.items[timer.key] = ReportItem(timer.key)
self.items[timer.key].add_timer(timer)
def analyze(self):
"""Performs an analysis of the timers."""
pass
def export(self):
"""Export the timer data for each timer."""
exported = {}
for key, item in self.items.items():
exported[key] = item.export()
return exported
def pretty_print(self):
"""Prints out the report in a nice format."""
for _, item in self.items.items():
print('{} ({}): Avg {} Min {} Max {} '.format(
item.key, len(item), item.average_duration, item.min_duration, item.max_duration))
if len(item) > 1:
for timer in item.top():
print(timer)
class ReportItem:
"""Report item used to store information about all timers with the same key."""
def __init__(self, key):
self.key = key
self.timers = []
self.start = None
self.end = None
def __len__(self):
return len(self.timers)
@property
def average_duration(self):
"""Calculate the average duration of the timers."""
duration_sum = 0
for timer in self.timers:
duration_sum = duration_sum + timer.duration
return duration_sum / len(self.timers)
@property
def max_duration(self):
"""Calculate the maximum duration of the timers."""
value = None
for timer in self.timers:
if not value or value < timer.duration:
value = timer.duration
return value
@property
def min_duration(self):
"""Calculate the minimum duration of the timers."""
value = None
for timer in self.timers:
if not value or value > timer.duration:
value = timer.duration
return value
@property
def duration(self):
"""Duration of timers."""
return self.end - self.start
def add_timer(self, timer):
"""Add a timer to the report item to track it."""
self.timers.append(timer)
# Keep track of the first start and last end time.
if not self.start or timer.start < self.start:
self.start = timer.start
if not self.end or timer.end > self.end:
self.end = timer.end
def export(self):
"""Export the timer data for each timer."""
return {
'start': self.start,
'end': self.end,
'timers': [t.export() for t in self.timers],
}
def top(self, count=5, ascending=False):
"""Get the top (or bottom) timers by duration."""
top_list = sorted(self.timers, key=lambda k: k.duration)
if not ascending:
top_list.reverse()
return top_list[:count]
| mit | 5cba4da7b97aec1d0322c6557aa6e9ca | 29.660194 | 98 | 0.559848 | 4.256065 | false | false | false | false |
grow/grow | grow/sdk/installers/gerrit_installer.py | 1 | 1751 | """Gerrit installer class."""
import subprocess
import urllib.parse
from grow.common import utils
from grow.sdk.installers import base_installer
KNOWN_GERRIT_HOSTS = (
'googlesource.com',
)
class GerritInstaller(base_installer.BaseInstaller):
"""Gerrit installer."""
KIND = 'gerrit'
@property
def should_run(self):
"""Should the installer run?"""
gerrit_setting = self.config.get('gerrit', None)
if gerrit_setting is not None:
return gerrit_setting
repo = utils.get_git_repo(self.pod.root)
if repo is None:
return False
for remote in repo.remotes:
url = remote.config_reader.get('url')
result = urllib.parse.urlparse(url)
if result.netloc.endswith(KNOWN_GERRIT_HOSTS):
return True
return False
def install(self):
"""Install dependencies."""
curl_command = (
'curl -sLo '
'`git rev-parse --git-dir`/hooks/commit-msg '
'https://gerrit-review.googlesource.com/tools/hooks/commit-msg')
process = subprocess.Popen(
curl_command, **self.subprocess_args(shell=True))
code = process.wait()
if not code:
chmod_command = 'chmod +x `git rev-parse --git-dir`/hooks/commit-msg'
process = subprocess.Popen(
chmod_command, **self.subprocess_args(shell=True))
code = process.wait()
if not code:
return
raise base_installer.InstallError(
'There was an error running `{}`.'.format(chmod_command))
raise base_installer.InstallError(
'There was an error running `{}`.'.format(curl_command))
| mit | 8ba281d78399aaf617a0e30a04a5891a | 31.425926 | 81 | 0.587664 | 4.062645 | false | false | false | false |
grow/grow | grow/sdk/installers/base_installer.py | 1 | 1558 | """Base installer class."""
from grow.sdk import sdk_utils
class Error(Exception):
"""Base error for installers."""
def __init__(self, message):
super(Error, self).__init__(message)
self.message = message
class MissingPrerequisiteError(Error):
"""Installer is missing a prerequisite."""
def __init__(self, message, install_commands=None):
super(MissingPrerequisiteError, self).__init__(message)
self.install_commands = install_commands
class InstallError(Error):
"""Installer had a error during install."""
pass
class BaseInstaller:
"""Base class for grow installers."""
KIND = None
def __init__(self, pod, config):
self.pod = pod
self.config = config
@property
def pre_install_messages(self):
"""List of messages to display before installing."""
return ['Installing: {}'.format(self.KIND)]
@property
def post_install_messages(self):
"""List of messages to display after installing."""
return ['Finished: {}'.format(self.KIND)]
@property
def should_run(self):
"""Should the installer run?"""
return True
def check_prerequisites(self):
"""Check if required prerequisites are installed or available."""
pass
def install(self):
"""Run the installer."""
raise NotImplementedError()
def subprocess_args(self, shell=False):
"""Arguments for running subprocess commands."""
return sdk_utils.subprocess_args(self.pod, shell=shell)
| mit | 231a2c9a30b6c0a8a0613dd96db51eca | 24.540984 | 73 | 0.630937 | 4.50289 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.