hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73109e94e405161ed8aebf68ae23f9e76eb1142 | 26,888 | py | Python | lib/pybtex-0.19/pybtex/database/__init__.py | cabeen/bibfmt | a2607506f15249f8e0ee900db103d57afec7dec8 | [
"MIT"
] | 1 | 2021-02-20T19:53:48.000Z | 2021-02-20T19:53:48.000Z | lib/pybtex-0.19/pybtex/database/__init__.py | cabeen/bibfmt | a2607506f15249f8e0ee900db103d57afec7dec8 | [
"MIT"
] | null | null | null | lib/pybtex-0.19/pybtex/database/__init__.py | cabeen/bibfmt | a2607506f15249f8e0ee900db103d57afec7dec8 | [
"MIT"
] | null | null | null | # vim: fileencoding=utf-8
# Copyright (c) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import re
from collections import Mapping
from pybtex.plugin import find_plugin
from pybtex.exceptions import PybtexError
from pybtex.utils import (
deprecated,
OrderedCaseInsensitiveDict, CaseInsensitiveDefaultDict, CaseInsensitiveSet
)
from pybtex.bibtex.utils import split_tex_string, scan_bibtex_string
from pybtex.errors import report_error
class BibliographyDataError(PybtexError):
pass
class InvalidNameString(PybtexError):
def __init__(self, name_string):
message = 'Too many commas in {}'.format(repr(name_string))
super(InvalidNameString, self).__init__(message)
class BibliographyData(object):
def __init__(self, entries=None, preamble=None, wanted_entries=None, min_crossrefs=2):
"""
A :py:class:`.BibliographyData` object contains a dictionary of bibliography
entries referenced by their keys.
Each entry represented by an :py:class:`.Entry` object.
Additionally, :py:class:`.BibliographyData` may contain a LaTeX
preamble defined by ``@PREAMBLE`` commands in the BibTeX file.
"""
self.entries = OrderedCaseInsensitiveDict()
'''A dictionary of bibliography entries referenced by their keys.
The dictionary is case insensitive:
>>> bib_data = parse_string("""
... @ARTICLE{gnats,
... author = {L[eslie] A. Aamport},
... title = {The Gnats and Gnus Document Preparation System},
... }
... """, 'bibtex')
>>> bib_data.entries['gnats'] == bib_data.entries['GNATS']
True
'''
self.crossref_count = CaseInsensitiveDefaultDict(int)
self.min_crossrefs = min_crossrefs
self._preamble = []
if wanted_entries is not None:
self.wanted_entries = CaseInsensitiveSet(wanted_entries)
self.citations = CaseInsensitiveSet(wanted_entries)
else:
self.wanted_entries = None
self.citations = CaseInsensitiveSet()
if entries:
if isinstance(entries, Mapping):
entries = entries.iteritems()
for (key, entry) in entries:
self.add_entry(key, entry)
if preamble:
self._preamble.extend(preamble)
def __eq__(self, other):
if not isinstance(other, BibliographyData):
return super(BibliographyData, self) == other
return (
self.entries == other.entries
and self._preamble == other._preamble
)
def __repr__(self):
return 'BibliographyData(entries={entries}, preamble={preamble})'.format(
entries=repr(self.entries),
preamble=repr(self._preamble),
)
def add_to_preamble(self, *values):
self._preamble.extend(values)
@property
def preamble(self):
r'''
LaTeX preamble.
>>> bib_data = parse_string(r"""
... @PREAMBLE{"\newcommand{\noopsort}[1]{}"}
... """, 'bibtex')
>>> print bib_data.preamble
\newcommand{\noopsort}[1]{}
.. versionadded:: 0.19
Earlier versions used :py:meth:`.get_preamble()`, which is now deprecated.
'''
return ''.join(self._preamble)
@deprecated('0.19', 'use BibliographyData.preamble instead')
def get_preamble(self):
"""
.. deprecated:: 0.19
Use :py:attr:`.preamble` instead.
"""
return self.preamble
def want_entry(self, key):
return (
self.wanted_entries is None
or key in self.wanted_entries
or '*' in self.wanted_entries
)
def get_canonical_key(self, key):
if key in self.citations:
return self.citations.get_canonical_key(key)
else:
return key
def add_entry(self, key, entry):
if not self.want_entry(key):
return
if key in self.entries:
report_error(BibliographyDataError('repeated bibliograhpy entry: %s' % key))
return
entry.collection = self
entry.key = self.get_canonical_key(key)
self.entries[entry.key] = entry
try:
crossref = entry.fields['crossref']
except KeyError:
pass
else:
if self.wanted_entries is not None:
self.wanted_entries.add(crossref)
def add_entries(self, entries):
for key, entry in entries:
self.add_entry(key, entry)
def _get_crossreferenced_citations(self, citations, min_crossrefs):
"""
Get cititations not cited explicitly but referenced by other citations.
>>> from pybtex.database import Entry
>>> data = BibliographyData({
... 'main_article': Entry('article', {'crossref': 'xrefd_arcicle'}),
... 'xrefd_arcicle': Entry('article'),
... })
>>> list(data._get_crossreferenced_citations([], min_crossrefs=1))
[]
>>> list(data._get_crossreferenced_citations(['main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data._get_crossreferenced_citations(['Main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data._get_crossreferenced_citations(['main_article'], min_crossrefs=2))
[]
>>> list(data._get_crossreferenced_citations(['xrefd_arcicle'], min_crossrefs=1))
[]
>>> data2 = BibliographyData(data.entries, wanted_entries=data.entries.keys())
>>> list(data2._get_crossreferenced_citations([], min_crossrefs=1))
[]
>>> list(data2._get_crossreferenced_citations(['main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data2._get_crossreferenced_citations(['Main_article'], min_crossrefs=1))
['xrefd_arcicle']
>>> list(data2._get_crossreferenced_citations(['main_article'], min_crossrefs=2))
[]
>>> list(data2._get_crossreferenced_citations(['xrefd_arcicle'], min_crossrefs=1))
[]
>>> list(data2._get_crossreferenced_citations(['xrefd_arcicle'], min_crossrefs=1))
[]
"""
crossref_count = CaseInsensitiveDefaultDict(int)
citation_set = CaseInsensitiveSet(citations)
for citation in citations:
try:
entry = self.entries[citation]
crossref = entry.fields['crossref']
except KeyError:
continue
try:
crossref_entry = self.entries[crossref]
except KeyError:
report_error(BibliographyDataError(
'bad cross-reference: entry "{key}" refers to '
'entry "{crossref}" which does not exist.'.format(
key=citation, crossref=crossref,
)
))
continue
canonical_crossref = crossref_entry.key
crossref_count[canonical_crossref] += 1
if crossref_count[canonical_crossref] >= min_crossrefs and canonical_crossref not in citation_set:
citation_set.add(canonical_crossref)
yield canonical_crossref
def _expand_wildcard_citations(self, citations):
"""
Expand wildcard citations (\citation{*} in .aux file).
>>> from pybtex.database import Entry
>>> data = BibliographyData((
... ('uno', Entry('article')),
... ('dos', Entry('article')),
... ('tres', Entry('article')),
... ('cuatro', Entry('article')),
... ))
>>> list(data._expand_wildcard_citations([]))
[]
>>> list(data._expand_wildcard_citations(['*']))
['uno', 'dos', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['uno', '*']))
['uno', 'dos', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['dos', '*']))
['dos', 'uno', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['*', 'uno']))
['uno', 'dos', 'tres', 'cuatro']
>>> list(data._expand_wildcard_citations(['*', 'DOS']))
['uno', 'dos', 'tres', 'cuatro']
"""
citation_set = CaseInsensitiveSet()
for citation in citations:
if citation == '*':
for key in self.entries:
if key not in citation_set:
citation_set.add(key)
yield key
else:
if citation not in citation_set:
citation_set.add(citation)
yield citation
def add_extra_citations(self, citations, min_crossrefs):
expanded_citations = list(self._expand_wildcard_citations(citations))
crossrefs = list(self._get_crossreferenced_citations(expanded_citations, min_crossrefs))
return expanded_citations + crossrefs
def to_string(self, bib_format, **kwargs):
"""
Return the data as a unicode string in the given format.
:param bib_format: Data format ("bibtex", "yaml", etc.).
.. versionadded:: 0.19
"""
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_string(self)
def to_bytes(self, bib_format, **kwargs):
"""
Return the data as a byte string in the given format.
:param bib_format: Data format ("bibtex", "yaml", etc.).
.. versionadded:: 0.19
"""
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_bytes(self)
def to_file(self, file, bib_format=None, **kwargs):
"""
Save the data to a file.
:param file: A file name or a file-like object.
:param bib_format: Data format ("bibtex", "yaml", etc.).
If not specified, Pybtex will try to guess by the file name.
.. versionadded:: 0.19
"""
if isinstance(file, basestring):
filename = file
else:
filename = getattr(file, 'name', None)
writer = find_plugin('pybtex.database.output', bib_format, filename=filename)(**kwargs)
return writer.write_file(self, file)
def lower(self):
u'''
Return another :py:class:`.BibliographyData` with all identifiers converted to lowercase.
>>> data = parse_string("""
... @BOOK{Obrazy,
... title = "Obrazy z Rus",
... author = "Karel Havlíček Borovský",
... }
... @BOOK{Elegie,
... title = "Tirolské elegie",
... author = "Karel Havlíček Borovský",
... }
... """, 'bibtex')
>>> data_lower = data.lower()
>>> data_lower.entries.keys()
['obrazy', 'elegie']
>>> for entry in data_lower.entries.values():
... entry.key
... entry.persons.keys()
... entry.fields.keys()
'obrazy'
['author']
['title']
'elegie'
['author']
['title']
'''
entries_lower = ((key.lower(), entry.lower()) for key, entry in self.entries.iteritems())
return type(self)(
entries=entries_lower,
preamble=self._preamble,
wanted_entries=self.wanted_entries,
min_crossrefs=self.min_crossrefs,
)
class FieldDict(OrderedCaseInsensitiveDict):
def __init__(self, parent, *args, **kwargw):
self.parent = parent
super(FieldDict, self).__init__(*args, **kwargw)
def __getitem__(self, key):
try:
return super(FieldDict, self).__getitem__(key)
except KeyError:
if key in self.parent.persons:
persons = self.parent.persons[key]
return ' and '.join(unicode(person) for person in persons)
elif 'crossref' in self:
return self.parent.get_crossref().fields[key]
else:
raise KeyError(key)
def lower(self):
lower_dict = super(FieldDict, self).lower()
return type(self)(self.parent, self.iteritems_lower())
class Entry(object):
"""A bibliography entry."""
key = None
"""Entry key (for example, ``'fukushima1980neocognitron'``)."""
def __init__(self, type_, fields=None, persons=None, collection=None):
if fields is None:
fields = {}
if persons is None:
persons = {}
self.type = type_.lower()
"""Entry type (``'book'``, ``'article'``, etc.)."""
self.original_type = type_
self.fields = FieldDict(self, fields)
"""A dictionary of entry fields.
The dictionary is ordered and case-insensitive."""
self.persons = OrderedCaseInsensitiveDict(persons)
"""A dictionary of entry persons, by their roles.
The most often used roles are ``'author'`` and ``'editor'``.
"""
self.collection = collection
# for BibTeX interpreter
self.vars = {}
def __eq__(self, other):
if not isinstance(other, Entry):
return super(Entry, self) == other
return (
self.type == other.type
and self.fields == other.fields
and self.persons == other.persons
)
def __repr__(self):
# representing fields as FieldDict causes problems with representing
# fields.parent, so represent it as a list of tuples
repr_fields = repr(self.fields.items())
return 'Entry({type_}, fields={fields}, persons={persons})'.format(
type_=repr(self.type),
fields=repr_fields,
persons=repr(self.persons),
)
def get_crossref(self):
return self.collection.entries[self.fields['crossref']]
def add_person(self, person, role):
self.persons.setdefault(role, []).append(person)
def lower(self):
return type(self)(
self.type,
fields=self.fields.lower(),
persons=self.persons.lower(),
collection=self.collection,
)
class Person(object):
"""A person or some other person-like entity.
>>> knuth = Person('Donald E. Knuth')
>>> knuth.first_names
['Donald']
>>> knuth.middle_names
['E.']
>>> knuth.last_names
['Knuth']
"""
valid_roles = ['author', 'editor']
style1_re = re.compile('^(.+),\s*(.+)$')
style2_re = re.compile('^(.+),\s*(.+),\s*(.+)$')
def __init__(self, string="", first="", middle="", prelast="", last="", lineage=""):
"""
:param string: The full name string.
It will be parsed and split into separate first, last, middle,
pre-last and lineage name parst.
Supported name formats are:
- von Last, First
- von Last, Jr, First
- First von Last
(see BibTeX manual for explanation)
"""
self.first_names = []
"""
A list of first names.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.first`, which is now deprecated.
"""
self.middle_names = []
"""
A list of middle names.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.middle`, which is now deprecated.
"""
self.prelast_names = []
"""
A list of pre-last (aka von) name parts.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.middle`, which is now deprecated.
"""
self.last_names = []
"""
A list of last names.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.last`, which is now deprecated.
"""
self.lineage_names = []
"""
A list of linage (aka Jr) name parts.
.. versionadded:: 0.19
Earlier versions used :py:meth:`.lineage`, which is now deprecated.
"""
string = string.strip()
if string:
self._parse_string(string)
self.first_names.extend(split_tex_string(first))
self.middle_names.extend(split_tex_string(middle))
self.prelast_names.extend(split_tex_string(prelast))
self.last_names.extend(split_tex_string(last))
self.lineage_names.extend(split_tex_string(lineage))
@property
def bibtex_first_names(self):
"""A list of first and middle names together.
(BibTeX treats all middle names as first.)
.. versionadded:: 0.19
Earlier versions used :py:meth:`Person.bibtex_first`, which is now deprecated.
>>> knuth = Person('Donald E. Knuth')
>>> knuth.bibtex_first_names
['Donald', 'E.']
"""
return self.first_names + self.middle_names
def _parse_string(self, name):
"""Extract various parts of the name from a string.
>>> p = Person('Avinash K. Dixit')
>>> print p.first_names
['Avinash']
>>> print p.middle_names
['K.']
>>> print p.prelast_names
[]
>>> print p.last_names
['Dixit']
>>> print p.lineage_names
[]
>>> print unicode(p)
Dixit, Avinash K.
>>> p == Person(unicode(p))
True
>>> p = Person('Dixit, Jr, Avinash K. ')
>>> print p.first_names
['Avinash']
>>> print p.middle_names
['K.']
>>> print p.prelast_names
[]
>>> print p.last_names
['Dixit']
>>> print p.lineage_names
['Jr']
>>> print unicode(p)
Dixit, Jr, Avinash K.
>>> p == Person(unicode(p))
True
>>> p = Person('abc')
>>> print p.first_names, p.middle_names, p.prelast_names, p.last_names, p.lineage_names
[] [] [] ['abc'] []
>>> p = Person('Viktorov, Michail~Markovitch')
>>> print p.first_names, p.middle_names, p.prelast_names, p.last_names, p.lineage_names
['Michail'] ['Markovitch'] [] ['Viktorov'] []
"""
def process_first_middle(parts):
try:
self.first_names.append(parts[0])
self.middle_names.extend(parts[1:])
except IndexError:
pass
def process_von_last(parts):
# von cannot be the last name in the list
von_last = parts[:-1]
definitely_not_von = parts[-1:]
if von_last:
von, last = rsplit_at(von_last, is_von_name)
self.prelast_names.extend(von)
self.last_names.extend(last)
self.last_names.extend(definitely_not_von)
def find_pos(lst, pred):
for i, item in enumerate(lst):
if pred(item):
return i
return i + 1
def split_at(lst, pred):
"""Split the given list into two parts.
The second part starts with the first item for which the given
predicate is True.
"""
pos = find_pos(lst, pred)
return lst[:pos], lst[pos:]
def rsplit_at(lst, pred):
rpos = find_pos(reversed(lst), pred)
pos = len(lst) - rpos
return lst[:pos], lst[pos:]
def is_von_name(string):
if string[0].isupper():
return False
if string[0].islower():
return True
else:
for char, brace_level in scan_bibtex_string(string):
if brace_level == 0 and char.isalpha():
return char.islower()
elif brace_level == 1 and char.startswith('\\'):
return special_char_islower(char)
return False
def special_char_islower(special_char):
control_sequence = True
for char in special_char[1:]: # skip the backslash
if control_sequence:
if not char.isalpha():
control_sequence = False
else:
if char.isalpha():
return char.islower()
return False
parts = split_tex_string(name, ',')
if len(parts) > 3:
report_error(InvalidNameString(name))
last_parts = parts[2:]
parts = parts[:2] + [' '.join(last_parts)]
if len(parts) == 3: # von Last, Jr, First
process_von_last(split_tex_string(parts[0]))
self.lineage_names.extend(split_tex_string(parts[1]))
process_first_middle(split_tex_string(parts[2]))
elif len(parts) == 2: # von Last, First
process_von_last(split_tex_string(parts[0]))
process_first_middle(split_tex_string(parts[1]))
elif len(parts) == 1: # First von Last
parts = split_tex_string(name)
first_middle, von_last = split_at(parts, is_von_name)
if not von_last and first_middle:
last = first_middle.pop()
von_last.append(last)
process_first_middle(first_middle)
process_von_last(von_last)
else:
# should hot really happen
raise ValueError(name)
def __eq__(self, other):
if not isinstance(other, Person):
return super(Person, self) == other
return (
self.first_names == other.first_names
and self.middle_names == other.middle_names
and self.prelast_names == other.prelast_names
and self.last_names == other.last_names
and self.lineage_names == other.lineage_names
)
def __unicode__(self):
# von Last, Jr, First
von_last = ' '.join(self.prelast_names + self.last_names)
jr = ' '.join(self.lineage_names)
first = ' '.join(self.first_names + self.middle_names)
return ', '.join(part for part in (von_last, jr, first) if part)
def __repr__(self):
return 'Person({0})'.format(repr(unicode(self)))
def get_part_as_text(self, type):
names = getattr(self, type + '_names')
return ' '.join(names)
def get_part(self, type, abbr=False):
"""Get a list of name parts by `type`.
>>> knuth = Person('Donald E. Knuth')
>>> knuth.get_part('first')
['Donald']
>>> knuth.get_part('last')
['Knuth']
"""
names = getattr(self, type + '_names')
if abbr:
import warnings
warnings.warn('Person.get_part(abbr=True) is deprecated since 0.19: use pybtex.textutils.abbreviate()', stacklevel=2)
from pybtex.textutils import abbreviate
names = [abbreviate(name) for name in names]
return names
@deprecated('0.19', 'use Person.first_names instead')
def first(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.first_names` instead.
"""
return self.get_part('first', abbr)
@deprecated('0.19', 'use Person.middle_names instead')
def middle(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.middle_names` instead.
"""
return self.get_part('middle', abbr)
@deprecated('0.19', 'use Person.prelast_names instead')
def prelast(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.prelast_names` instead.
"""
return self.get_part('prelast', abbr)
@deprecated('0.19', 'use Person.last_names instead')
def last(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.last_names` instead.
"""
return self.get_part('last', abbr)
@deprecated('0.19', 'use Person.lineage_names instead')
def lineage(self, abbr=False):
"""
.. deprecated:: 0.19
Use :py:attr:`.lineage_names` instead.
"""
return self.get_part('lineage', abbr)
@deprecated('0.19', 'use Person.bibtex_first_names instead')
def bibtex_first(self):
"""
.. deprecated:: 0.19
Use :py:attr:`.bibtex_first_names` instead.
"""
return self.bibtex_first_names
def parse_file(file, bib_format=None, **kwargs):
"""
Read bibliography data from file and return a :py:class:`.BibliographyData` object.
:param file: A file name or a file-like object.
:param bib_format: Data format ("bibtex", "yaml", etc.).
If not specified, Pybtex will try to guess by the file name.
.. versionadded:: 0.19
"""
if isinstance(file, basestring):
filename = file
else:
filename = geattr(file, 'name', None)
parser = find_plugin('pybtex.database.input', bib_format, filename=filename)(**kwargs)
return parser.parse_file(file)
def parse_string(value, bib_format, **kwargs):
"""
Parse a Unicode string containing bibliography data and return a :py:class:`.BibliographyData` object.
:param value: Unicode string.
:param bib_format: Data format ("bibtex", "yaml", etc.).
.. versionadded:: 0.19
"""
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_string(value)
def parse_bytes(value, bib_format, **kwargs):
"""
Parse a byte string containing bibliography data and return a :py:class:`.BibliographyData` object.
:param value: Byte string.
:param bib_format: Data format (for example, "bibtexml").
.. versionadded:: 0.19
"""
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_bytes(value)
| 33.652065 | 129 | 0.576056 |
import re
from collections import Mapping
from pybtex.plugin import find_plugin
from pybtex.exceptions import PybtexError
from pybtex.utils import (
deprecated,
OrderedCaseInsensitiveDict, CaseInsensitiveDefaultDict, CaseInsensitiveSet
)
from pybtex.bibtex.utils import split_tex_string, scan_bibtex_string
from pybtex.errors import report_error
class BibliographyDataError(PybtexError):
pass
class InvalidNameString(PybtexError):
def __init__(self, name_string):
message = 'Too many commas in {}'.format(repr(name_string))
super(InvalidNameString, self).__init__(message)
class BibliographyData(object):
def __init__(self, entries=None, preamble=None, wanted_entries=None, min_crossrefs=2):
self.entries = OrderedCaseInsensitiveDict()
self.crossref_count = CaseInsensitiveDefaultDict(int)
self.min_crossrefs = min_crossrefs
self._preamble = []
if wanted_entries is not None:
self.wanted_entries = CaseInsensitiveSet(wanted_entries)
self.citations = CaseInsensitiveSet(wanted_entries)
else:
self.wanted_entries = None
self.citations = CaseInsensitiveSet()
if entries:
if isinstance(entries, Mapping):
entries = entries.iteritems()
for (key, entry) in entries:
self.add_entry(key, entry)
if preamble:
self._preamble.extend(preamble)
def __eq__(self, other):
if not isinstance(other, BibliographyData):
return super(BibliographyData, self) == other
return (
self.entries == other.entries
and self._preamble == other._preamble
)
def __repr__(self):
return 'BibliographyData(entries={entries}, preamble={preamble})'.format(
entries=repr(self.entries),
preamble=repr(self._preamble),
)
def add_to_preamble(self, *values):
self._preamble.extend(values)
@property
def preamble(self):
return ''.join(self._preamble)
@deprecated('0.19', 'use BibliographyData.preamble instead')
def get_preamble(self):
return self.preamble
def want_entry(self, key):
return (
self.wanted_entries is None
or key in self.wanted_entries
or '*' in self.wanted_entries
)
def get_canonical_key(self, key):
if key in self.citations:
return self.citations.get_canonical_key(key)
else:
return key
def add_entry(self, key, entry):
if not self.want_entry(key):
return
if key in self.entries:
report_error(BibliographyDataError('repeated bibliograhpy entry: %s' % key))
return
entry.collection = self
entry.key = self.get_canonical_key(key)
self.entries[entry.key] = entry
try:
crossref = entry.fields['crossref']
except KeyError:
pass
else:
if self.wanted_entries is not None:
self.wanted_entries.add(crossref)
def add_entries(self, entries):
for key, entry in entries:
self.add_entry(key, entry)
def _get_crossreferenced_citations(self, citations, min_crossrefs):
crossref_count = CaseInsensitiveDefaultDict(int)
citation_set = CaseInsensitiveSet(citations)
for citation in citations:
try:
entry = self.entries[citation]
crossref = entry.fields['crossref']
except KeyError:
continue
try:
crossref_entry = self.entries[crossref]
except KeyError:
report_error(BibliographyDataError(
'bad cross-reference: entry "{key}" refers to '
'entry "{crossref}" which does not exist.'.format(
key=citation, crossref=crossref,
)
))
continue
canonical_crossref = crossref_entry.key
crossref_count[canonical_crossref] += 1
if crossref_count[canonical_crossref] >= min_crossrefs and canonical_crossref not in citation_set:
citation_set.add(canonical_crossref)
yield canonical_crossref
def _expand_wildcard_citations(self, citations):
citation_set = CaseInsensitiveSet()
for citation in citations:
if citation == '*':
for key in self.entries:
if key not in citation_set:
citation_set.add(key)
yield key
else:
if citation not in citation_set:
citation_set.add(citation)
yield citation
def add_extra_citations(self, citations, min_crossrefs):
expanded_citations = list(self._expand_wildcard_citations(citations))
crossrefs = list(self._get_crossreferenced_citations(expanded_citations, min_crossrefs))
return expanded_citations + crossrefs
def to_string(self, bib_format, **kwargs):
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_string(self)
def to_bytes(self, bib_format, **kwargs):
writer = find_plugin('pybtex.database.output', bib_format)(**kwargs)
return writer.to_bytes(self)
def to_file(self, file, bib_format=None, **kwargs):
if isinstance(file, basestring):
filename = file
else:
filename = getattr(file, 'name', None)
writer = find_plugin('pybtex.database.output', bib_format, filename=filename)(**kwargs)
return writer.write_file(self, file)
def lower(self):
entries_lower = ((key.lower(), entry.lower()) for key, entry in self.entries.iteritems())
return type(self)(
entries=entries_lower,
preamble=self._preamble,
wanted_entries=self.wanted_entries,
min_crossrefs=self.min_crossrefs,
)
class FieldDict(OrderedCaseInsensitiveDict):
def __init__(self, parent, *args, **kwargw):
self.parent = parent
super(FieldDict, self).__init__(*args, **kwargw)
def __getitem__(self, key):
try:
return super(FieldDict, self).__getitem__(key)
except KeyError:
if key in self.parent.persons:
persons = self.parent.persons[key]
return ' and '.join(unicode(person) for person in persons)
elif 'crossref' in self:
return self.parent.get_crossref().fields[key]
else:
raise KeyError(key)
def lower(self):
lower_dict = super(FieldDict, self).lower()
return type(self)(self.parent, self.iteritems_lower())
class Entry(object):
key = None
def __init__(self, type_, fields=None, persons=None, collection=None):
if fields is None:
fields = {}
if persons is None:
persons = {}
self.type = type_.lower()
self.original_type = type_
self.fields = FieldDict(self, fields)
self.persons = OrderedCaseInsensitiveDict(persons)
self.collection = collection
self.vars = {}
def __eq__(self, other):
if not isinstance(other, Entry):
return super(Entry, self) == other
return (
self.type == other.type
and self.fields == other.fields
and self.persons == other.persons
)
def __repr__(self):
repr_fields = repr(self.fields.items())
return 'Entry({type_}, fields={fields}, persons={persons})'.format(
type_=repr(self.type),
fields=repr_fields,
persons=repr(self.persons),
)
def get_crossref(self):
return self.collection.entries[self.fields['crossref']]
def add_person(self, person, role):
self.persons.setdefault(role, []).append(person)
def lower(self):
return type(self)(
self.type,
fields=self.fields.lower(),
persons=self.persons.lower(),
collection=self.collection,
)
class Person(object):
valid_roles = ['author', 'editor']
style1_re = re.compile('^(.+),\s*(.+)$')
style2_re = re.compile('^(.+),\s*(.+),\s*(.+)$')
def __init__(self, string="", first="", middle="", prelast="", last="", lineage=""):
self.first_names = []
self.middle_names = []
self.prelast_names = []
self.last_names = []
self.lineage_names = []
string = string.strip()
if string:
self._parse_string(string)
self.first_names.extend(split_tex_string(first))
self.middle_names.extend(split_tex_string(middle))
self.prelast_names.extend(split_tex_string(prelast))
self.last_names.extend(split_tex_string(last))
self.lineage_names.extend(split_tex_string(lineage))
@property
def bibtex_first_names(self):
return self.first_names + self.middle_names
def _parse_string(self, name):
def process_first_middle(parts):
try:
self.first_names.append(parts[0])
self.middle_names.extend(parts[1:])
except IndexError:
pass
def process_von_last(parts):
von_last = parts[:-1]
definitely_not_von = parts[-1:]
if von_last:
von, last = rsplit_at(von_last, is_von_name)
self.prelast_names.extend(von)
self.last_names.extend(last)
self.last_names.extend(definitely_not_von)
def find_pos(lst, pred):
for i, item in enumerate(lst):
if pred(item):
return i
return i + 1
def split_at(lst, pred):
pos = find_pos(lst, pred)
return lst[:pos], lst[pos:]
def rsplit_at(lst, pred):
rpos = find_pos(reversed(lst), pred)
pos = len(lst) - rpos
return lst[:pos], lst[pos:]
def is_von_name(string):
if string[0].isupper():
return False
if string[0].islower():
return True
else:
for char, brace_level in scan_bibtex_string(string):
if brace_level == 0 and char.isalpha():
return char.islower()
elif brace_level == 1 and char.startswith('\\'):
return special_char_islower(char)
return False
def special_char_islower(special_char):
control_sequence = True
for char in special_char[1:]:
if control_sequence:
if not char.isalpha():
control_sequence = False
else:
if char.isalpha():
return char.islower()
return False
parts = split_tex_string(name, ',')
if len(parts) > 3:
report_error(InvalidNameString(name))
last_parts = parts[2:]
parts = parts[:2] + [' '.join(last_parts)]
if len(parts) == 3:
process_von_last(split_tex_string(parts[0]))
self.lineage_names.extend(split_tex_string(parts[1]))
process_first_middle(split_tex_string(parts[2]))
elif len(parts) == 2:
process_von_last(split_tex_string(parts[0]))
process_first_middle(split_tex_string(parts[1]))
elif len(parts) == 1:
parts = split_tex_string(name)
first_middle, von_last = split_at(parts, is_von_name)
if not von_last and first_middle:
last = first_middle.pop()
von_last.append(last)
process_first_middle(first_middle)
process_von_last(von_last)
else:
raise ValueError(name)
def __eq__(self, other):
if not isinstance(other, Person):
return super(Person, self) == other
return (
self.first_names == other.first_names
and self.middle_names == other.middle_names
and self.prelast_names == other.prelast_names
and self.last_names == other.last_names
and self.lineage_names == other.lineage_names
)
def __unicode__(self):
von_last = ' '.join(self.prelast_names + self.last_names)
jr = ' '.join(self.lineage_names)
first = ' '.join(self.first_names + self.middle_names)
return ', '.join(part for part in (von_last, jr, first) if part)
def __repr__(self):
return 'Person({0})'.format(repr(unicode(self)))
def get_part_as_text(self, type):
names = getattr(self, type + '_names')
return ' '.join(names)
def get_part(self, type, abbr=False):
names = getattr(self, type + '_names')
if abbr:
import warnings
warnings.warn('Person.get_part(abbr=True) is deprecated since 0.19: use pybtex.textutils.abbreviate()', stacklevel=2)
from pybtex.textutils import abbreviate
names = [abbreviate(name) for name in names]
return names
@deprecated('0.19', 'use Person.first_names instead')
def first(self, abbr=False):
return self.get_part('first', abbr)
@deprecated('0.19', 'use Person.middle_names instead')
def middle(self, abbr=False):
return self.get_part('middle', abbr)
@deprecated('0.19', 'use Person.prelast_names instead')
def prelast(self, abbr=False):
return self.get_part('prelast', abbr)
@deprecated('0.19', 'use Person.last_names instead')
def last(self, abbr=False):
return self.get_part('last', abbr)
@deprecated('0.19', 'use Person.lineage_names instead')
def lineage(self, abbr=False):
return self.get_part('lineage', abbr)
@deprecated('0.19', 'use Person.bibtex_first_names instead')
def bibtex_first(self):
return self.bibtex_first_names
def parse_file(file, bib_format=None, **kwargs):
if isinstance(file, basestring):
filename = file
else:
filename = geattr(file, 'name', None)
parser = find_plugin('pybtex.database.input', bib_format, filename=filename)(**kwargs)
return parser.parse_file(file)
def parse_string(value, bib_format, **kwargs):
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_string(value)
def parse_bytes(value, bib_format, **kwargs):
parser = find_plugin('pybtex.database.input', bib_format)(**kwargs)
return parser.parse_bytes(value)
| true | true |
f7310a03ff8c470b1a97fb65773a13fadaa84311 | 17,145 | py | Python | cinder/tests/unit/attachments/test_attachments_api.py | helenwalsh/cinder | 307fccea4cc9c6496334b0fe137206cb48499bd5 | [
"Apache-2.0"
] | 1 | 2019-02-17T17:49:41.000Z | 2019-02-17T17:49:41.000Z | cinder/tests/unit/attachments/test_attachments_api.py | BelieveInFuture/cinder | fff95fa6a68a054488ee087b6e31f4f5e28209dc | [
"Apache-2.0"
] | 1 | 2020-12-22T20:40:20.000Z | 2020-12-23T18:34:42.000Z | cinder/tests/unit/attachments/test_attachments_api.py | BelieveInFuture/cinder | fff95fa6a68a054488ee087b6e31f4f5e28209dc | [
"Apache-2.0"
] | 3 | 2020-06-16T07:29:48.000Z | 2020-06-21T10:22:57.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.policies import attachments as attachment_policy
from cinder.policies import base as base_policy
from cinder import policy
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder.tests.unit import utils as tests_utils
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
CONF = cfg.CONF
class AttachmentManagerTestCase(test.TestCase):
"""Attachment related test for volume/api.py."""
def setUp(self):
"""Setup test class."""
super(AttachmentManagerTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
self.project_id = fake.PROJECT3_ID
self.context.project_id = self.project_id
self.volume_api = volume_api.API()
self.user_context = context.RequestContext(
user_id=fake.USER_ID,
project_id=fake.PROJECT3_ID)
def test_attachment_create_no_connector(self):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_with_connector(self,
mock_rpc_attachment_update):
"""Test attachment_create with connector."""
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
connector = {'fake': 'connector'}
attachment = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector)
mock_rpc_attachment_update.assert_called_once_with(self.context,
mock.ANY,
connector,
mock.ANY)
new_attachment = objects.VolumeAttachment.get_by_id(self.context,
attachment.id)
self.assertEqual(connection_info, new_attachment.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
def test_attachment_delete_reserved(self,
mock_rpc_attachment_delete):
"""Test attachment_delete with reserved."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aobj = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aobj)
# Since it's just reserved and never finalized, we should never make an
# rpc call
mock_rpc_attachment_delete.assert_not_called()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_update_and_delete(
self,
mock_rpc_attachment_update,
mock_rpc_attachment_delete):
"""Test attachment_delete."""
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
vref = objects.Volume.get_by_id(self.context,
vref.id)
connector = {'fake': 'connector',
'host': 'somehost'}
self.volume_api.attachment_update(self.context,
aref,
connector)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(connection_info, aref.connection_info)
# We mock the actual call that updates the status
# so force it here
values = {'volume_id': vref.id,
'volume_host': vref.host,
'attach_status': 'attached',
'instance_uuid': fake.UUID2}
aref = db.volume_attach(self.context, values)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aref)
mock_rpc_attachment_delete.assert_called_once_with(self.context,
aref.id,
mock.ANY)
def test_additional_attachment_create_no_connector(self):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual(2, len(vref.volume_attachment))
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_reserve_delete(
self,
mock_rpc_attachment_update):
volume_params = {'status': 'available'}
connector = {
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector=connector)
vref = objects.Volume.get_by_id(self.context,
vref.id)
# Need to set the status here because our mock isn't doing it for us
vref.status = 'in-use'
vref.save()
# Now a second attachment acting as a reserve
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
# We should now be able to delete the original attachment that gave us
# 'in-use' status, and in turn we should revert to the outstanding
# attachments reserve
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
def test_reserve_reserve_delete(self):
"""Test that we keep reserved status across multiple reserves."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.assertEqual(1, len(vref.volume_attachment))
def test_attachment_create_bootable_multiattach_policy(self):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.multiattach = True
vref.bootable = True
vref.status = 'in-use'
rules = {
attachment_policy.MULTIATTACH_BOOTABLE_VOLUME_POLICY: base_policy.RULE_ADMIN_API # noqa
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.addCleanup(policy.reset)
self.assertRaises(exception.PolicyNotAuthorized,
self.volume_api.attachment_create,
self.user_context,
vref,
fake.UUID2)
def test_attachment_create_readonly_volume(self):
"""Test attachment_create on a readonly volume."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.volume_api.update_readonly_flag(self.context, vref, True)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('ro', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
def test_attachment_create_volume_in_error_state(self):
"""Test attachment_create volume in error state."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.status = "error"
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID2)
def test_attachment_update_volume_in_error_state(self):
"""Test attachment_update volumem in error state."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
vref.status = 'error'
vref.save()
connector = {'fake': 'connector',
'host': 'somehost'}
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
aref,
connector)
@mock.patch('cinder.db.sqlalchemy.api.volume_attachment_update',
return_value={})
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update',
return_value={})
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get',
v2_fakes.fake_volume_type_get)
def test_attachment_update_duplicate(self, mock_va_update, mock_db_upd):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context,
deleted=0,
**volume_params)
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID1,
'somehost',
'somemountpoint')
# Update volume with another attachment
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID2,
'somehost2',
'somemountpoint2')
vref.refresh()
# This attachment will collide with the first
connector = {'host': 'somehost'}
vref.volume_attachment[0]['connector'] = {'host': 'somehost'}
vref.volume_attachment[0]['connection_info'] = {'c': 'd'}
with mock.patch('cinder.objects.Volume.get_by_id', return_value=vref):
with mock.patch.object(self.volume_api.volume_rpcapi,
'attachment_update') as m_au:
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
vref.volume_attachment[1],
connector)
m_au.assert_not_called()
mock_va_update.assert_not_called()
mock_db_upd.assert_not_called()
def test_attachment_create_creating_volume(self):
"""Test attachment_create on a creating volume."""
volume_params = {'status': 'creating'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
| 45.72 | 100 | 0.552289 |
from unittest import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.policies import attachments as attachment_policy
from cinder.policies import base as base_policy
from cinder import policy
from cinder.tests.unit.api.v2 import fakes as v2_fakes
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import test
from cinder.tests.unit import utils as tests_utils
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
CONF = cfg.CONF
class AttachmentManagerTestCase(test.TestCase):
def setUp(self):
super(AttachmentManagerTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
self.project_id = fake.PROJECT3_ID
self.context.project_id = self.project_id
self.volume_api = volume_api.API()
self.user_context = context.RequestContext(
user_id=fake.USER_ID,
project_id=fake.PROJECT3_ID)
def test_attachment_create_no_connector(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_with_connector(self,
mock_rpc_attachment_update):
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
connector = {'fake': 'connector'}
attachment = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector)
mock_rpc_attachment_update.assert_called_once_with(self.context,
mock.ANY,
connector,
mock.ANY)
new_attachment = objects.VolumeAttachment.get_by_id(self.context,
attachment.id)
self.assertEqual(connection_info, new_attachment.connection_info)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
def test_attachment_delete_reserved(self,
mock_rpc_attachment_delete):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aobj = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aobj)
# rpc call
mock_rpc_attachment_delete.assert_not_called()
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_update_and_delete(
self,
mock_rpc_attachment_update,
mock_rpc_attachment_delete):
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
vref = objects.Volume.get_by_id(self.context,
vref.id)
connector = {'fake': 'connector',
'host': 'somehost'}
self.volume_api.attachment_update(self.context,
aref,
connector)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(connection_info, aref.connection_info)
# We mock the actual call that updates the status
# so force it here
values = {'volume_id': vref.id,
'volume_host': vref.host,
'attach_status': 'attached',
'instance_uuid': fake.UUID2}
aref = db.volume_attach(self.context, values)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aref)
mock_rpc_attachment_delete.assert_called_once_with(self.context,
aref.id,
mock.ANY)
def test_additional_attachment_create_no_connector(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('null', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual(2, len(vref.volume_attachment))
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_reserve_delete(
self,
mock_rpc_attachment_update):
volume_params = {'status': 'available'}
connector = {
"initiator": "iqn.1993-08.org.debian:01:cad181614cec",
"ip": "192.168.1.20",
"platform": "x86_64",
"host": "tempest-1",
"os_type": "linux2",
"multipath": False}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector=connector)
vref = objects.Volume.get_by_id(self.context,
vref.id)
# Need to set the status here because our mock isn't doing it for us
vref.status = 'in-use'
vref.save()
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
def test_reserve_reserve_delete(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.volume_api.attachment_delete(self.context,
aref)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual('reserved', vref.status)
self.assertEqual(1, len(vref.volume_attachment))
def test_attachment_create_bootable_multiattach_policy(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.multiattach = True
vref.bootable = True
vref.status = 'in-use'
rules = {
attachment_policy.MULTIATTACH_BOOTABLE_VOLUME_POLICY: base_policy.RULE_ADMIN_API
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
self.addCleanup(policy.reset)
self.assertRaises(exception.PolicyNotAuthorized,
self.volume_api.attachment_create,
self.user_context,
vref,
fake.UUID2)
def test_attachment_create_readonly_volume(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.volume_api.update_readonly_flag(self.context, vref, True)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual('ro', aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
def test_attachment_create_volume_in_error_state(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
vref.status = "error"
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID2)
def test_attachment_update_volume_in_error_state(self):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
vref.status = 'error'
vref.save()
connector = {'fake': 'connector',
'host': 'somehost'}
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
aref,
connector)
@mock.patch('cinder.db.sqlalchemy.api.volume_attachment_update',
return_value={})
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update',
return_value={})
@mock.patch.object(db.sqlalchemy.api, '_volume_type_get',
v2_fakes.fake_volume_type_get)
def test_attachment_update_duplicate(self, mock_va_update, mock_db_upd):
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context,
deleted=0,
**volume_params)
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID1,
'somehost',
'somemountpoint')
tests_utils.attach_volume(self.context,
vref.id,
fake.UUID2,
'somehost2',
'somemountpoint2')
vref.refresh()
connector = {'host': 'somehost'}
vref.volume_attachment[0]['connector'] = {'host': 'somehost'}
vref.volume_attachment[0]['connection_info'] = {'c': 'd'}
with mock.patch('cinder.objects.Volume.get_by_id', return_value=vref):
with mock.patch.object(self.volume_api.volume_rpcapi,
'attachment_update') as m_au:
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_update,
self.context,
vref.volume_attachment[1],
connector)
m_au.assert_not_called()
mock_va_update.assert_not_called()
mock_db_upd.assert_not_called()
def test_attachment_create_creating_volume(self):
volume_params = {'status': 'creating'}
vref = tests_utils.create_volume(self.context, **volume_params)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
| true | true |
f7310a6506b1291656e1d48a8acd6e1397f886e7 | 1,296 | py | Python | docs/scripts/build-go.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | docs/scripts/build-go.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | docs/scripts/build-go.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Build go.html
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import sys
import os
# Third-party modules
from sphinx.util.inventory import InventoryFile
JS = """
function redirect(rmap) {
var href = window.location.href;
var label = href.split('#')[1];
var base = href.substr(0, href.indexOf("go.html"))
window.location = base + rmap[label];
}
"""
def process(path):
r = [
"<html>",
"<head>",
"<title>NOC go</title>",
"</head>",
"<body>",
"<script>",
JS,
"redirect({",
]
with open(path) as f:
data = InventoryFile.load(f, "", os.path.join) or {}
rr = []
for entry, einfo in sorted(data["std:label"].items()):
rr += ["'%s': '%s'" % (entry, einfo[2])]
r += [",".join(rr), "});", "</script>", "</body>", "</html>"]
base = os.path.dirname(path)
go_path = os.path.join(base, "go.html")
with open(go_path, "w") as f:
f.write("".join(r))
if __name__ == "__main__":
process(sys.argv[1])
| 25.411765 | 72 | 0.45216 |
import sys
import os
from sphinx.util.inventory import InventoryFile
JS = """
function redirect(rmap) {
var href = window.location.href;
var label = href.split('#')[1];
var base = href.substr(0, href.indexOf("go.html"))
window.location = base + rmap[label];
}
"""
def process(path):
r = [
"<html>",
"<head>",
"<title>NOC go</title>",
"</head>",
"<body>",
"<script>",
JS,
"redirect({",
]
with open(path) as f:
data = InventoryFile.load(f, "", os.path.join) or {}
rr = []
for entry, einfo in sorted(data["std:label"].items()):
rr += ["'%s': '%s'" % (entry, einfo[2])]
r += [",".join(rr), "});", "</script>", "</body>", "</html>"]
base = os.path.dirname(path)
go_path = os.path.join(base, "go.html")
with open(go_path, "w") as f:
f.write("".join(r))
if __name__ == "__main__":
process(sys.argv[1])
| true | true |
f7310adaaf9f436b99dc44a44f9873149854a8c7 | 312 | py | Python | trabajos/ejercicio8.py | marilynmamani/marilyn-M.Q | 54090978b1f6e2f12b79b5dd39c59e9594226414 | [
"Apache-2.0"
] | null | null | null | trabajos/ejercicio8.py | marilynmamani/marilyn-M.Q | 54090978b1f6e2f12b79b5dd39c59e9594226414 | [
"Apache-2.0"
] | null | null | null | trabajos/ejercicio8.py | marilynmamani/marilyn-M.Q | 54090978b1f6e2f12b79b5dd39c59e9594226414 | [
"Apache-2.0"
] | null | null | null | def votoElecciones():
print("Como saber si puedes votar por tu edad")
mensaje =""
edadP=int(input("ingrese la edad que tiene:"))
if edadP>=18:
mensaje ="Usted esta apto para votar"
else:
mensaje ="Usted no cumple con la edadad minima y no esta apto para votar"
print(mensaje)
votoElecciones() | 31.2 | 77 | 0.701923 | def votoElecciones():
print("Como saber si puedes votar por tu edad")
mensaje =""
edadP=int(input("ingrese la edad que tiene:"))
if edadP>=18:
mensaje ="Usted esta apto para votar"
else:
mensaje ="Usted no cumple con la edadad minima y no esta apto para votar"
print(mensaje)
votoElecciones() | true | true |
f7310ade1292bec44ede20c2ba3965b6ae5472ce | 26,481 | py | Python | openstack_dashboard/test/unit/api/test_nova.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/unit/api/test_nova.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/unit/api/test_nova.py | NunoEdgarGFlowHub/horizon | 73a0bbd43ea78ac5337f7d00977ec5f32452067e | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.conf import settings
from django.test.utils import override_settings
import mock
from novaclient import api_versions
from novaclient import exceptions as nova_exceptions
from novaclient.v2 import flavor_access as nova_flavor_access
from novaclient.v2 import servers
from horizon import exceptions as horizon_exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class ServerWrapperTests(test.TestCase):
use_mox = False
def test_get_base_attribute(self):
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(self.servers.first().id, server.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name(self, mock_image_get):
image = self.images.first()
mock_image_get.return_value = image
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(image.name, server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name_no_glance_service(self, mock_image_get):
server = self.servers.first()
exc_catalog = horizon_exceptions.ServiceCatalogException('image')
mock_image_get.side_effect = exc_catalog
server = api.nova.Server(server, self.request)
self.assertIsNone(server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
server.image['id'])
class ComputeApiTests(test.APIMockTestCase):
def _mock_current_version(self, mock_novaclient, version,
min_version=None):
ver = mock.Mock()
ver.min_version = min_version or '2.1'
ver.version = version
mock_novaclient.versions.get_current.return_value = ver
# To handle upgrade_api
self.novaclient.api_version = api_versions.APIVersion(version)
def test_server_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_HARD
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_soft_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_SOFT
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id, HARDNESS)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_vnc_console(self):
server = self.servers.first()
console = self.servers.vnc_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_vnc_console.return_value = console
ret_val = api.nova.server_vnc_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.VNCConsole)
novaclient.servers.get_vnc_console.assert_called_once_with(
server.id, console_type)
def test_server_spice_console(self):
server = self.servers.first()
console = self.servers.spice_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_spice_console.return_value = console
ret_val = api.nova.server_spice_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.SPICEConsole)
novaclient.servers.get_spice_console.assert_called_once_with(
server.id, console_type)
def test_server_rdp_console(self):
server = self.servers.first()
console = self.servers.rdp_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_rdp_console.return_value = console
ret_val = api.nova.server_rdp_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.RDPConsole)
novaclient.servers.get_rdp_console.assert_called_once_with(
server.id, console_type)
def test_server_mks_console(self):
server = self.servers.first()
console = self.servers.mks_console_data
console_type = console["remote_console"]["type"]
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.53')
novaclient.servers.get_mks_console.return_value = console
ret_val = api.nova.server_mks_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.MKSConsole)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get_mks_console.assert_called_once_with(
server.id, console_type)
def test_server_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(
self.request,
search_opts={'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True, {'all_tenants': True})
def test_server_list_pagination(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertFalse(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_server_list_pagination_more(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers[:page_size + 1]
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertEqual(page_size, len(ret_val))
self.assertTrue(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
def test_usage_get(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usages.get.return_value = self.usages.first()
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_called_once_with(
self.tenant.id, 'start', 'end')
def test_usage_get_paginated(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.get.side_effect = [
self.usages.first(),
{},
]
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_has_calls([
mock.call(self.tenant.id, 'start', 'end'),
mock.call(self.tenant.id, 'start', 'end',
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_usage_list(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usage.list.return_value = usages
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_called_once_with('start', 'end', True)
def test_usage_list_paginated(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.list.side_effect = [
usages,
{},
]
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_has_calls([
mock.call('start', 'end', True),
mock.call('start', 'end', True,
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_server_get(self):
server = self.servers.first()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.get.return_value = server
ret_val = api.nova.server_get(self.request, server.id)
self.assertIsInstance(ret_val, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get.assert_called_once_with(server.id)
def test_server_metadata_update(self):
server = self.servers.first()
metadata = {'foo': 'bar'}
novaclient = self.stub_novaclient()
novaclient.servers.set_meta.return_value = None
ret_val = api.nova.server_metadata_update(self.request,
server.id,
metadata)
self.assertIsNone(ret_val)
novaclient.servers.set_meta.assert_called_once_with(server.id,
metadata)
def test_server_metadata_delete(self):
server = self.servers.first()
keys = ['a', 'b']
novaclient = self.stub_novaclient()
novaclient.servers.delete_meta.return_value = None
ret_val = api.nova.server_metadata_delete(self.request,
server.id,
keys)
self.assertIsNone(ret_val)
novaclient.servers.delete_meta.assert_called_once_with(server.id, keys)
def _test_absolute_limits(self, values, expected_results):
limits = mock.Mock()
limits.absolute = []
for key, val in values.items():
limit = mock.Mock()
limit.name = key
limit.value = val
limits.absolute.append(limit)
novaclient = self.stub_novaclient()
novaclient.limits.get.return_value = limits
ret_val = api.nova.tenant_absolute_limits(self.request, reserved=True)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
novaclient.limits.get.assert_called_once_with(reserved=True,
tenant_id=None)
def test_absolute_limits_handle_unlimited(self):
values = {"maxTotalCores": -1, "maxTotalInstances": 10}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10}
self._test_absolute_limits(values, expected_results)
def test_absolute_limits_negative_used_workaround(self):
values = {"maxTotalCores": -1,
"maxTotalInstances": 10,
"totalInstancesUsed": -1,
"totalCoresUsed": -1,
"totalRAMUsed": -2048,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10,
"totalInstancesUsed": 0,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
self._test_absolute_limits(values, expected_results)
def test_cold_migrate_host_succeed(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", False, True,
True)
self.assertTrue(ret_val)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_cold_migrate_host_fails(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.side_effect = \
nova_exceptions.ClientException(404)
self.assertRaises(nova_exceptions.ClientException,
api.nova.migrate_host,
self.request, "host", False, True, True)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_live_migrate_host_with_active_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.first()
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_with_paused_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[3]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_without_running_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[1]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.migrate.assert_called_once_with(server_uuid)
"""Flavor Tests"""
def test_flavor_list_no_extras(self):
flavors = self.flavors.list()
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors = api.nova.flavor_list(self.request)
self.assertEqual(len(flavors), len(api_flavors))
novaclient.flavors.list.assert_called_once_with(is_public=True)
def test_flavor_get_no_extras(self):
flavor = self.flavors.list()[1]
novaclient = self.stub_novaclient()
novaclient.flavors.get.return_value = flavor
api_flavor = api.nova.flavor_get(self.request, flavor.id)
self.assertEqual(api_flavor.id, flavor.id)
novaclient.flavors.get.assert_called_once_with(flavor.id)
def _test_flavor_list_paged(self, reversed_order=False, paginate=True):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
flavors = self.flavors.list()
order = 'asc' if reversed_order else 'desc'
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors, has_more, has_prev = api.nova.flavor_list_paged(
self.request, True, False, None, paginate=paginate,
reversed_order=reversed_order)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertFalse(has_more)
self.assertFalse(has_prev)
if paginate:
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=None, limit=page_size + 1,
sort_key='name', sort_dir=order)
else:
novaclient.flavors.list.assert_called_once_with(
is_public=True)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_flavor_list_pagination_more_and_prev(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
flavors = self.flavors.list()
marker = flavors[0].id
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors[1:page_size + 2]
api_flavors, has_more, has_prev = api.nova\
.flavor_list_paged(
self.request,
True,
False,
marker,
paginate=True)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertEqual(page_size, len(api_flavors))
self.assertTrue(has_more)
self.assertTrue(has_prev)
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=marker, limit=page_size + 1,
sort_key='name', sort_dir='desc')
def test_flavor_list_paged_default_order(self):
self._test_flavor_list_paged()
def test_flavor_list_paged_reversed_order(self):
self._test_flavor_list_paged(reversed_order=True)
def test_flavor_list_paged_paginate_false(self):
self._test_flavor_list_paged(paginate=False)
def test_flavor_create(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.create.return_value = flavor
api_flavor = api.nova.flavor_create(self.request,
flavor.name,
flavor.ram,
flavor.vcpus,
flavor.disk)
self.assertIsInstance(api_flavor, type(flavor))
self.assertEqual(flavor.name, api_flavor.name)
self.assertEqual(flavor.ram, api_flavor.ram)
self.assertEqual(flavor.vcpus, api_flavor.vcpus)
self.assertEqual(flavor.disk, api_flavor.disk)
self.assertEqual(0, api_flavor.ephemeral)
self.assertEqual(0, api_flavor.swap)
self.assertTrue(api_flavor.is_public)
self.assertEqual(1, api_flavor.rxtx_factor)
novaclient.flavors.create.assert_called_once_with(
flavor.name, flavor.ram, flavor.vcpus, flavor.disk,
flavorid='auto', ephemeral=0, swap=0, is_public=True,
rxtx_factor=1)
def test_flavor_delete(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.delete.return_value = None
api_val = api.nova.flavor_delete(self.request, flavor.id)
self.assertIsNone(api_val)
novaclient.flavors.delete.assert_called_once_with(flavor.id)
def test_flavor_access_list(self):
flavor_access = self.flavor_access.list()
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.list.return_value = flavor_access
api_flavor_access = api.nova.flavor_access_list(self.request, flavor)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertIsInstance(access, nova_flavor_access.FlavorAccess)
self.assertEqual(access.flavor_id, flavor.id)
novaclient.flavor_access.list.assert_called_once_with(flavor=flavor)
def test_add_tenant_to_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.add_tenant_access.return_value = flavor_access
api_flavor_access = api.nova.add_tenant_to_flavor(self.request,
flavor,
tenant)
self.assertIsInstance(api_flavor_access, list)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertEqual(access.flavor_id, flavor.id)
self.assertEqual(access.tenant_id, tenant.id)
novaclient.flavor_access.add_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_remove_tenant_from_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.remove_tenant_access.return_value = []
api_val = api.nova.remove_tenant_from_flavor(self.request,
flavor,
tenant)
self.assertEqual(len(api_val), len([]))
self.assertIsInstance(api_val, list)
novaclient.flavor_access.remove_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_server_group_list(self):
server_groups = self.server_groups.list()
novaclient = self.stub_novaclient()
novaclient.server_groups.list.return_value = server_groups
ret_val = api.nova.server_group_list(self.request)
self.assertIsInstance(ret_val, list)
self.assertEqual(len(ret_val), len(server_groups))
novaclient.server_groups.list.assert_called_once_with()
| 40.99226 | 79 | 0.629848 |
from __future__ import absolute_import
from django.conf import settings
from django.test.utils import override_settings
import mock
from novaclient import api_versions
from novaclient import exceptions as nova_exceptions
from novaclient.v2 import flavor_access as nova_flavor_access
from novaclient.v2 import servers
from horizon import exceptions as horizon_exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class ServerWrapperTests(test.TestCase):
use_mox = False
def test_get_base_attribute(self):
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(self.servers.first().id, server.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name(self, mock_image_get):
image = self.images.first()
mock_image_get.return_value = image
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(image.name, server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(), image.id)
@mock.patch.object(api.glance, 'image_get')
def test_image_name_no_glance_service(self, mock_image_get):
server = self.servers.first()
exc_catalog = horizon_exceptions.ServiceCatalogException('image')
mock_image_get.side_effect = exc_catalog
server = api.nova.Server(server, self.request)
self.assertIsNone(server.image_name)
mock_image_get.assert_called_once_with(test.IsHttpRequest(),
server.image['id'])
class ComputeApiTests(test.APIMockTestCase):
def _mock_current_version(self, mock_novaclient, version,
min_version=None):
ver = mock.Mock()
ver.min_version = min_version or '2.1'
ver.version = version
mock_novaclient.versions.get_current.return_value = ver
self.novaclient.api_version = api_versions.APIVersion(version)
def test_server_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_HARD
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_soft_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_SOFT
novaclient = self.stub_novaclient()
novaclient.servers.reboot.return_value = None
ret_val = api.nova.server_reboot(self.request, server.id, HARDNESS)
self.assertIsNone(ret_val)
novaclient.servers.reboot.assert_called_once_with(
server.id, HARDNESS)
def test_server_vnc_console(self):
server = self.servers.first()
console = self.servers.vnc_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_vnc_console.return_value = console
ret_val = api.nova.server_vnc_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.VNCConsole)
novaclient.servers.get_vnc_console.assert_called_once_with(
server.id, console_type)
def test_server_spice_console(self):
server = self.servers.first()
console = self.servers.spice_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_spice_console.return_value = console
ret_val = api.nova.server_spice_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.SPICEConsole)
novaclient.servers.get_spice_console.assert_called_once_with(
server.id, console_type)
def test_server_rdp_console(self):
server = self.servers.first()
console = self.servers.rdp_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers.get_rdp_console.return_value = console
ret_val = api.nova.server_rdp_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.RDPConsole)
novaclient.servers.get_rdp_console.assert_called_once_with(
server.id, console_type)
def test_server_mks_console(self):
server = self.servers.first()
console = self.servers.mks_console_data
console_type = console["remote_console"]["type"]
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.53')
novaclient.servers.get_mks_console.return_value = console
ret_val = api.nova.server_mks_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.MKSConsole)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get_mks_console.assert_called_once_with(
server.id, console_type)
def test_server_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(
self.request,
search_opts={'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True, {'all_tenants': True})
def test_server_list_pagination(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertFalse(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_server_list_pagination_more(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
servers = self.servers.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.list.return_value = servers[:page_size + 1]
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True,
'all_tenants': True})
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertEqual(page_size, len(ret_val))
self.assertTrue(has_more)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1})
def test_usage_get(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usages.get.return_value = self.usages.first()
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_called_once_with(
self.tenant.id, 'start', 'end')
def test_usage_get_paginated(self):
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.get.side_effect = [
self.usages.first(),
{},
]
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.get.assert_has_calls([
mock.call(self.tenant.id, 'start', 'end'),
mock.call(self.tenant.id, 'start', 'end',
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_usage_list(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.1')
novaclient.usage.list.return_value = usages
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_called_once_with('start', 'end', True)
def test_usage_list_paginated(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.40')
novaclient.usage.list.side_effect = [
usages,
{},
]
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
novaclient.versions.get_current.assert_called_once_with()
novaclient.usage.list.assert_has_calls([
mock.call('start', 'end', True),
mock.call('start', 'end', True,
marker=u'063cf7f3-ded1-4297-bc4c-31eae876cc93'),
])
def test_server_get(self):
server = self.servers.first()
novaclient = self.stub_novaclient()
self._mock_current_version(novaclient, '2.45')
novaclient.servers.get.return_value = server
ret_val = api.nova.server_get(self.request, server.id)
self.assertIsInstance(ret_val, api.nova.Server)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get.assert_called_once_with(server.id)
def test_server_metadata_update(self):
server = self.servers.first()
metadata = {'foo': 'bar'}
novaclient = self.stub_novaclient()
novaclient.servers.set_meta.return_value = None
ret_val = api.nova.server_metadata_update(self.request,
server.id,
metadata)
self.assertIsNone(ret_val)
novaclient.servers.set_meta.assert_called_once_with(server.id,
metadata)
def test_server_metadata_delete(self):
server = self.servers.first()
keys = ['a', 'b']
novaclient = self.stub_novaclient()
novaclient.servers.delete_meta.return_value = None
ret_val = api.nova.server_metadata_delete(self.request,
server.id,
keys)
self.assertIsNone(ret_val)
novaclient.servers.delete_meta.assert_called_once_with(server.id, keys)
def _test_absolute_limits(self, values, expected_results):
limits = mock.Mock()
limits.absolute = []
for key, val in values.items():
limit = mock.Mock()
limit.name = key
limit.value = val
limits.absolute.append(limit)
novaclient = self.stub_novaclient()
novaclient.limits.get.return_value = limits
ret_val = api.nova.tenant_absolute_limits(self.request, reserved=True)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
novaclient.limits.get.assert_called_once_with(reserved=True,
tenant_id=None)
def test_absolute_limits_handle_unlimited(self):
values = {"maxTotalCores": -1, "maxTotalInstances": 10}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10}
self._test_absolute_limits(values, expected_results)
def test_absolute_limits_negative_used_workaround(self):
values = {"maxTotalCores": -1,
"maxTotalInstances": 10,
"totalInstancesUsed": -1,
"totalCoresUsed": -1,
"totalRAMUsed": -2048,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10,
"totalInstancesUsed": 0,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
self._test_absolute_limits(values, expected_results)
def test_cold_migrate_host_succeed(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", False, True,
True)
self.assertTrue(ret_val)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_cold_migrate_host_fails(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.migrate.side_effect = \
nova_exceptions.ClientException(404)
self.assertRaises(nova_exceptions.ClientException,
api.nova.migrate_host,
self.request, "host", False, True, True)
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.migrate.assert_called_once_with('test_uuid')
def test_live_migrate_host_with_active_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.first()
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_with_paused_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[3]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.live_migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.live_migrate.assert_called_once_with(
server_uuid, None, True, True)
def test_live_migrate_host_without_running_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[1]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
self._mock_current_version(novaclient, '2.45')
novaclient.hypervisors.search.return_value = [hypervisor]
novaclient.servers.get.return_value = server
novaclient.servers.migrate.return_value = None
ret_val = api.nova.migrate_host(self.request, "host", True, True, True)
self.assertTrue(ret_val)
novaclient.versions.get_current.assert_called_once_with()
novaclient.hypervisors.search.assert_called_once_with('host', True)
novaclient.servers.get.assert_called_once_with(server_uuid)
novaclient.servers.migrate.assert_called_once_with(server_uuid)
def test_flavor_list_no_extras(self):
flavors = self.flavors.list()
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors = api.nova.flavor_list(self.request)
self.assertEqual(len(flavors), len(api_flavors))
novaclient.flavors.list.assert_called_once_with(is_public=True)
def test_flavor_get_no_extras(self):
flavor = self.flavors.list()[1]
novaclient = self.stub_novaclient()
novaclient.flavors.get.return_value = flavor
api_flavor = api.nova.flavor_get(self.request, flavor.id)
self.assertEqual(api_flavor.id, flavor.id)
novaclient.flavors.get.assert_called_once_with(flavor.id)
def _test_flavor_list_paged(self, reversed_order=False, paginate=True):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
flavors = self.flavors.list()
order = 'asc' if reversed_order else 'desc'
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors
api_flavors, has_more, has_prev = api.nova.flavor_list_paged(
self.request, True, False, None, paginate=paginate,
reversed_order=reversed_order)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertFalse(has_more)
self.assertFalse(has_prev)
if paginate:
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=None, limit=page_size + 1,
sort_key='name', sort_dir=order)
else:
novaclient.flavors.list.assert_called_once_with(
is_public=True)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_flavor_list_pagination_more_and_prev(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
flavors = self.flavors.list()
marker = flavors[0].id
novaclient = self.stub_novaclient()
novaclient.flavors.list.return_value = flavors[1:page_size + 2]
api_flavors, has_more, has_prev = api.nova\
.flavor_list_paged(
self.request,
True,
False,
marker,
paginate=True)
for flavor in api_flavors:
self.assertIsInstance(flavor, type(flavors[0]))
self.assertEqual(page_size, len(api_flavors))
self.assertTrue(has_more)
self.assertTrue(has_prev)
novaclient.flavors.list.assert_called_once_with(
is_public=True, marker=marker, limit=page_size + 1,
sort_key='name', sort_dir='desc')
def test_flavor_list_paged_default_order(self):
self._test_flavor_list_paged()
def test_flavor_list_paged_reversed_order(self):
self._test_flavor_list_paged(reversed_order=True)
def test_flavor_list_paged_paginate_false(self):
self._test_flavor_list_paged(paginate=False)
def test_flavor_create(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.create.return_value = flavor
api_flavor = api.nova.flavor_create(self.request,
flavor.name,
flavor.ram,
flavor.vcpus,
flavor.disk)
self.assertIsInstance(api_flavor, type(flavor))
self.assertEqual(flavor.name, api_flavor.name)
self.assertEqual(flavor.ram, api_flavor.ram)
self.assertEqual(flavor.vcpus, api_flavor.vcpus)
self.assertEqual(flavor.disk, api_flavor.disk)
self.assertEqual(0, api_flavor.ephemeral)
self.assertEqual(0, api_flavor.swap)
self.assertTrue(api_flavor.is_public)
self.assertEqual(1, api_flavor.rxtx_factor)
novaclient.flavors.create.assert_called_once_with(
flavor.name, flavor.ram, flavor.vcpus, flavor.disk,
flavorid='auto', ephemeral=0, swap=0, is_public=True,
rxtx_factor=1)
def test_flavor_delete(self):
flavor = self.flavors.first()
novaclient = self.stub_novaclient()
novaclient.flavors.delete.return_value = None
api_val = api.nova.flavor_delete(self.request, flavor.id)
self.assertIsNone(api_val)
novaclient.flavors.delete.assert_called_once_with(flavor.id)
def test_flavor_access_list(self):
flavor_access = self.flavor_access.list()
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.list.return_value = flavor_access
api_flavor_access = api.nova.flavor_access_list(self.request, flavor)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertIsInstance(access, nova_flavor_access.FlavorAccess)
self.assertEqual(access.flavor_id, flavor.id)
novaclient.flavor_access.list.assert_called_once_with(flavor=flavor)
def test_add_tenant_to_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.add_tenant_access.return_value = flavor_access
api_flavor_access = api.nova.add_tenant_to_flavor(self.request,
flavor,
tenant)
self.assertIsInstance(api_flavor_access, list)
self.assertEqual(len(flavor_access), len(api_flavor_access))
for access in api_flavor_access:
self.assertEqual(access.flavor_id, flavor.id)
self.assertEqual(access.tenant_id, tenant.id)
novaclient.flavor_access.add_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_remove_tenant_from_flavor(self):
flavor_access = [self.flavor_access.first()]
flavor = [f for f in self.flavors.list() if f.id ==
flavor_access[0].flavor_id][0]
tenant = [t for t in self.tenants.list() if t.id ==
flavor_access[0].tenant_id][0]
novaclient = self.stub_novaclient()
novaclient.flavor_access.remove_tenant_access.return_value = []
api_val = api.nova.remove_tenant_from_flavor(self.request,
flavor,
tenant)
self.assertEqual(len(api_val), len([]))
self.assertIsInstance(api_val, list)
novaclient.flavor_access.remove_tenant_access.assert_called_once_with(
flavor=flavor, tenant=tenant)
def test_server_group_list(self):
server_groups = self.server_groups.list()
novaclient = self.stub_novaclient()
novaclient.server_groups.list.return_value = server_groups
ret_val = api.nova.server_group_list(self.request)
self.assertIsInstance(ret_val, list)
self.assertEqual(len(ret_val), len(server_groups))
novaclient.server_groups.list.assert_called_once_with()
| true | true |
f7310b7ead9624ea504e64c406cc2aff15d3c26e | 475 | py | Python | File3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | 1 | 2021-06-07T07:55:28.000Z | 2021-06-07T07:55:28.000Z | File3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | File3.py | PRASAD-DANGARE/PYTHON | 36214f7dc3762d327e5a29e40752edeb098249c8 | [
"MIT"
] | null | null | null | '''
Function Name : main()
Description : How To Open File & Read The Data Using Open, Read
Function Date : 15 Mar 2021
Function Author : Prasad Dangare
Input : Int
Output : Int
'''
def main():
name = input("Enter the file name that you want to Read : ")
fobj = open(name,"r") # create new file
print("Data from file is ")
print(fobj.read())
if __name__ == "__main__":
main()
| 21.590909 | 70 | 0.543158 |
def main():
name = input("Enter the file name that you want to Read : ")
fobj = open(name,"r")
print("Data from file is ")
print(fobj.read())
if __name__ == "__main__":
main()
| true | true |
f7310b85b49a3410f4d4de81bf967f2752594b51 | 2,260 | py | Python | src/recording_script_generator/app/sentence_splitting.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | src/recording_script_generator/app/sentence_splitting.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | src/recording_script_generator/app/sentence_splitting.py | stefantaubert/recording-script-generator | 01cdcd4b85ed7f245f4bb8535d870c04472746c9 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Optional
from recording_script_generator.app.helper import (
raise_error_if_directory_exists_and_not_overwrite,
raise_error_if_directory_not_exists)
from recording_script_generator.app.io import (load_reading_passages,
load_reading_passages_paths,
load_selection,
save_reading_passages,
save_reading_passages_paths,
save_representations,
save_selection)
from recording_script_generator.core.sentence_splitting import main_inplace
from recording_script_generator.globals import (DEFAULT_CHUNKSIZE_FILES,
DEFAULT_MAXTASKSPERCHILD,
DEFAULT_N_JOBS,
DEFAULT_OVERWRITE)
def app_split_sentences(working_directory: Path, custom_output_directory: Optional[Path] = None, n_jobs: int = DEFAULT_N_JOBS, maxtasksperchild: Optional[int] = DEFAULT_MAXTASKSPERCHILD, chunksize: Optional[int] = DEFAULT_CHUNKSIZE_FILES, overwrite: bool = DEFAULT_OVERWRITE):
if raise_error_if_directory_not_exists(working_directory):
return
output_directory = working_directory
if custom_output_directory is not None:
if raise_error_if_directory_exists_and_not_overwrite(custom_output_directory, overwrite):
return
output_directory = custom_output_directory
selection = load_selection(working_directory)
reading_passages = load_reading_passages(working_directory)
reading_passages_paths = load_reading_passages_paths(working_directory)
representations = main_inplace(selection, reading_passages, reading_passages_paths,
n_jobs, maxtasksperchild, chunksize)
save_reading_passages(output_directory, reading_passages)
save_selection(output_directory, selection)
save_reading_passages_paths(output_directory, reading_passages_paths)
save_representations(output_directory, representations)
# TODO maybe also remove unused paths from paths
| 52.55814 | 276 | 0.684071 | from pathlib import Path
from typing import Optional
from recording_script_generator.app.helper import (
raise_error_if_directory_exists_and_not_overwrite,
raise_error_if_directory_not_exists)
from recording_script_generator.app.io import (load_reading_passages,
load_reading_passages_paths,
load_selection,
save_reading_passages,
save_reading_passages_paths,
save_representations,
save_selection)
from recording_script_generator.core.sentence_splitting import main_inplace
from recording_script_generator.globals import (DEFAULT_CHUNKSIZE_FILES,
DEFAULT_MAXTASKSPERCHILD,
DEFAULT_N_JOBS,
DEFAULT_OVERWRITE)
def app_split_sentences(working_directory: Path, custom_output_directory: Optional[Path] = None, n_jobs: int = DEFAULT_N_JOBS, maxtasksperchild: Optional[int] = DEFAULT_MAXTASKSPERCHILD, chunksize: Optional[int] = DEFAULT_CHUNKSIZE_FILES, overwrite: bool = DEFAULT_OVERWRITE):
if raise_error_if_directory_not_exists(working_directory):
return
output_directory = working_directory
if custom_output_directory is not None:
if raise_error_if_directory_exists_and_not_overwrite(custom_output_directory, overwrite):
return
output_directory = custom_output_directory
selection = load_selection(working_directory)
reading_passages = load_reading_passages(working_directory)
reading_passages_paths = load_reading_passages_paths(working_directory)
representations = main_inplace(selection, reading_passages, reading_passages_paths,
n_jobs, maxtasksperchild, chunksize)
save_reading_passages(output_directory, reading_passages)
save_selection(output_directory, selection)
save_reading_passages_paths(output_directory, reading_passages_paths)
save_representations(output_directory, representations)
| true | true |
f7310bc599c78dfa6a7cf3aaae17e6e710d2caae | 4,872 | py | Python | teste/polls/tests.py | DarlanGabriel/djangoproject | e2260edcbcb8a8cdf2e4e60f46d3ab3fd6e035c3 | [
"Apache-2.0"
] | null | null | null | teste/polls/tests.py | DarlanGabriel/djangoproject | e2260edcbcb8a8cdf2e4e60f46d3ab3fd6e035c3 | [
"Apache-2.0"
] | null | null | null | teste/polls/tests.py | DarlanGabriel/djangoproject | e2260edcbcb8a8cdf2e4e60f46d3ab3fd6e035c3 | [
"Apache-2.0"
] | null | null | null | import datetime
from django.test import TestCase
from django.utils import timezone
from .models import Question
from django.urls import reverse
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
| 37.19084 | 84 | 0.66564 | import datetime
from django.test import TestCase
from django.utils import timezone
from .models import Question
from django.urls import reverse
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions whose pub_date
is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions whose pub_date
is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published the
given number of `days` offset to now (negative for questions published
in the past, positive for questions that have yet to be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text, pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
| false | true |
f7310ca10c88fc71c44d09d18e1abc98736f1dc9 | 10,685 | py | Python | pokemon_data.py | aroxby/pk-stadium-decoder | 71f23bcc7035fcd763e69372387becc1b4744fd0 | [
"Unlicense"
] | null | null | null | pokemon_data.py | aroxby/pk-stadium-decoder | 71f23bcc7035fcd763e69372387becc1b4744fd0 | [
"Unlicense"
] | null | null | null | pokemon_data.py | aroxby/pk-stadium-decoder | 71f23bcc7035fcd763e69372387becc1b4744fd0 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
from enum import Enum
class Type(Enum):
NORMAL = 0
FIGHTING = 1
FLYING = 2
POISON = 3
GROUND = 4
ROCK = 5
BIRD = 6
BUG = 7
GHOST = 8
FIRE = 20
WATER = 21
GRASS = 22
ELECTRIC = 23
PSYCHIC = 24
ICE = 25
DRAGON = 26
def __str__(self):
MAPPING = {
self.BIRD: 'Bird',
self.BUG: 'Bug',
self.DRAGON: 'Dragon',
self.ELECTRIC: 'Electric',
self.FIGHTING: 'Fighting',
self.FIRE: 'Fire',
self.FLYING: 'Flying',
self.GHOST: 'Ghost',
self.GRASS: 'Grass',
self.GROUND: 'Ground',
self.ICE: 'Ice',
self.NORMAL: 'Normal',
self.POISON: 'Poison',
self.PSYCHIC: 'Psychic',
self.ROCK: 'Rock',
self.WATER: 'Water',
}
name = MAPPING.get(self, f'<Type {self.value}>')
return name
class Move(Enum):
NONE = 0
POUND = 1
KARATECHOP = 2
DOUBLESLAP = 3
COMETPUNCH = 4
MEGAPUNCH = 5
PAYDAY = 6
FIREPUNCH = 7
ICEPUNCH = 8
THUNDERPUNCH = 9
SCRATCH = 10
VICEGRIP = 11
GUILLOTINE = 12
RAZORWIND = 13
SWORDSDANCE = 14
CUT = 15
GUST = 16
WINGATTACK = 17
WHIRLWIND = 18
FLY = 19
BIND = 20
SLAM = 21
VINEWHIP = 22
STOMP = 23
DOUBLEKICK = 24
MEGAKICK = 25
JUMPKICK = 26
ROLLINGKICK = 27
SANDATTACK = 28
HEADBUTT = 29
HORNATTACK = 30
FURYATTACK = 31
HORNDRILL = 32
TACKLE = 33
BODYSLAM = 34
WRAP = 35
TAKEDOWN = 36
THRASH = 37
DOUBLEEDGE = 38
TAILWHIP = 39
POISONSTING = 40
TWINEEDLE = 41
PINMISSILE = 42
LEER = 43
BITE = 44
GROWL = 45
ROAR = 46
SING = 47
SUPERSONIC = 48
SONICBOOM = 49
DISABLE = 50
ACID = 51
EMBER = 52
FLAMETHROWER = 53
MIST = 54
WATERGUN = 55
HYDROPUMP = 56
SURF = 57
ICEBEAM = 58
BLIZZARD = 59
PSYBEAM = 60
BUBBLEBEAM = 61
AURORABEAM = 62
HYPERBEAM = 63
PECK = 64
DRILLPECK = 65
SUBMISSION = 66
LOWKICK = 67
COUNTER = 68
SEISMICTOSS = 69
STRENGTH = 70
ABSORB = 71
MEGADRAIN = 72
LEECHSEED = 73
GROWTH = 74
RAZORLEAF = 75
SOLARBEAM = 76
POISONPOWDER = 77
STUNSPORE = 78
SLEEPPOWDER = 79
PETALDANCE = 80
STRINGSHOT = 81
DRAGONRAGE = 82
FIRESPIN = 83
THUNDERSHOCK = 84
THUNDERBOLT = 85
THUNDERWAVE = 86
THUNDER = 87
ROCKTHROW = 88
EARTHQUAKE = 89
FISSURE = 90
DIG = 91
TOXIC = 92
CONFUSION = 93
PSYCHIC = 94
HYPNOSIS = 95
MEDITATE = 96
AGILITY = 97
QUICKATTACK = 98
RAGE = 99
TELEPORT = 100
NIGHTSHADE = 101
MIMIC = 102
SCREECH = 103
DOUBLETEAM = 104
RECOVER = 105
HARDEN = 106
MINIMIZE = 107
SMOKESCREEN = 108
CONFUSERAY = 109
WITHDRAW = 110
DEFENSECURL = 111
BARRIER = 112
LIGHTSCREEN = 113
HAZE = 114
REFLECT = 115
FOCUSENERGY = 116
BIDE = 117
METRONOME = 118
MIRRORMOVE = 119
SELFDESTRUCT = 120
EGGBOMB = 121
LICK = 122
SMOG = 123
SLUDGE = 124
BONECLUB = 125
FIREBLAST = 126
WATERFALL = 127
CLAMP = 128
SWIFT = 129
SKULLBASH = 130
SPIKECANNON = 131
CONSTRICT = 132
AMNESIA = 133
KINESIS = 134
SOFTBOILED = 135
HIJUMPKICK = 136
GLARE = 137
DREAMEATER = 138
POISONGAS = 139
BARRAGE = 140
LEECHLIFE = 141
LOVELYKISS = 142
SKYATTACK = 143
TRANSFORM = 144
BUBBLE = 145
DIZZYPUNCH = 146
SPORE = 147
FLASH = 148
PSYWAVE = 149
SPLASH = 150
ACIDARMOR = 151
CRABHAMMER = 152
EXPLOSION = 153
FURYSWIPES = 154
BONEMERANG = 155
REST = 156
ROCKSLIDE = 157
HYPERFANG = 158
SHARPEN = 159
CONVERSION = 160
TRIATTACK = 161
SUPERFANG = 162
SLASH = 163
SUBSTITUTE = 164
STRUGGLE = 165
def __str__(self):
MAPPING = {
self.NONE: '-',
self.ABSORB: 'Absorb',
self.ACIDARMOR: 'Acid Armor',
self.ACID: 'Acid',
self.AGILITY: 'Agility',
self.AMNESIA: 'Amnesia',
self.AURORABEAM: 'Aurora Beam',
self.BARRAGE: 'Barrage',
self.BARRIER: 'Barrier',
self.BIDE: 'Bide',
self.BIND: 'Bind',
self.BITE: 'Bite',
self.BLIZZARD: 'Blizzard',
self.BODYSLAM: 'Body Slam',
self.BONECLUB: 'Bone Club',
self.BONEMERANG: 'Bonemerang',
self.BUBBLE: 'Bubble',
self.BUBBLEBEAM: 'Bubblebeam',
self.CLAMP: 'Clamp',
self.COMETPUNCH: 'Comet Punch',
self.CONFUSERAY: 'Confuse Ray',
self.CONFUSION: 'Confusion',
self.CONSTRICT: 'Constrict',
self.CONVERSION: 'Conversion',
self.COUNTER: 'Counter',
self.CRABHAMMER: 'Crabhammer',
self.CUT: 'Cut',
self.DEFENSECURL: 'Defense Curl',
self.DIG: 'Dig',
self.DISABLE: 'Disable',
self.DIZZYPUNCH: 'Dizzy Punch',
self.DOUBLEKICK: 'Double Kick',
self.DOUBLETEAM: 'Double Team',
self.DOUBLEEDGE: 'Double-Edge',
self.DOUBLESLAP: 'Doubleslap',
self.DRAGONRAGE: 'Dragon Rage',
self.DREAMEATER: 'Dream Eater',
self.DRILLPECK: 'Drill Peck',
self.EARTHQUAKE: 'Earthquake',
self.EGGBOMB: 'Egg Bomb',
self.EMBER: 'Ember',
self.EXPLOSION: 'Explosion',
self.FIREBLAST: 'Fire Blast',
self.FIREPUNCH: 'Fire Punch',
self.FIRESPIN: 'Fire Spin',
self.FISSURE: 'Fissure',
self.FLAMETHROWER: 'Flamethrower',
self.FLASH: 'Flash',
self.FLY: 'Fly',
self.FOCUSENERGY: 'Focus Energy',
self.FURYATTACK: 'Fury Attack',
self.FURYSWIPES: 'Fury Swipes',
self.GLARE: 'Glare',
self.GROWL: 'Growl',
self.GROWTH: 'Growth',
self.GUILLOTINE: 'Guillotine',
self.GUST: 'Gust',
self.HARDEN: 'Harden',
self.HAZE: 'Haze',
self.HEADBUTT: 'Headbutt',
self.HIJUMPKICK: 'Hi Jump Kick',
self.HORNATTACK: 'Horn Attack',
self.HORNDRILL: 'Horn Drill',
self.HYDROPUMP: 'Hydro Pump',
self.HYPERBEAM: 'Hyper Beam',
self.HYPERFANG: 'Hyper Fang',
self.HYPNOSIS: 'Hypnosis',
self.ICEBEAM: 'Ice Beam',
self.ICEPUNCH: 'Ice Punch',
self.JUMPKICK: 'Jump Kick',
self.KARATECHOP: 'Karate Chop',
self.KINESIS: 'Kinesis',
self.LEECHLIFE: 'Leech Life',
self.LEECHSEED: 'Leech Seed',
self.LEER: 'Leer',
self.LICK: 'Lick',
self.LIGHTSCREEN: 'Light Screen',
self.LOVELYKISS: 'Lovely Kiss',
self.LOWKICK: 'Low Kick',
self.MEDITATE: 'Meditate',
self.MEGADRAIN: 'Mega Drain',
self.MEGAKICK: 'Mega Kick',
self.MEGAPUNCH: 'Mega Punch',
self.METRONOME: 'Metronome',
self.MIMIC: 'Mimic',
self.MINIMIZE: 'Minimize',
self.MIRRORMOVE: 'Mirror Move',
self.MIST: 'Mist',
self.NIGHTSHADE: 'Night Shade',
self.PAYDAY: 'Pay Day',
self.PECK: 'Peck',
self.PETALDANCE: 'Petal Dance',
self.PINMISSILE: 'Pin Missile',
self.POISONGAS: 'Poison Gas',
self.POISONSTING: 'Poison Sting',
self.POISONPOWDER: 'Poisonpowder',
self.POUND: 'Pound',
self.PSYBEAM: 'Psybeam',
self.PSYCHIC: 'Psychic',
self.PSYWAVE: 'Psywave',
self.QUICKATTACK: 'Quick Attack',
self.RAGE: 'Rage',
self.RAZORLEAF: 'Razor Leaf',
self.RAZORWIND: 'Razor Wind',
self.RECOVER: 'Recover',
self.REFLECT: 'Reflect',
self.REST: 'Rest',
self.ROAR: 'Roar',
self.ROCKSLIDE: 'Rock Slide',
self.ROCKTHROW: 'Rock Throw',
self.ROLLINGKICK: 'Rolling Kick',
self.SANDATTACK: 'Sand-Attack',
self.SCRATCH: 'Scratch',
self.SCREECH: 'Screech',
self.SEISMICTOSS: 'Seismic Toss',
self.SELFDESTRUCT: 'Selfdestruct',
self.SHARPEN: 'Sharpen',
self.SING: 'Sing',
self.SKULLBASH: 'Skull Bash',
self.SKYATTACK: 'Sky Attack',
self.SLAM: 'Slam',
self.SLASH: 'Slash',
self.SLEEPPOWDER: 'Sleep Powder',
self.SLUDGE: 'Sludge',
self.SMOG: 'Smog',
self.SMOKESCREEN: 'Smokescreen',
self.SOFTBOILED: 'Softboiled',
self.SOLARBEAM: 'Solarbeam',
self.SONICBOOM: 'Sonicboom',
self.SPIKECANNON: 'Spike Cannon',
self.SPLASH: 'Splash',
self.SPORE: 'Spore',
self.STOMP: 'Stomp',
self.STRENGTH: 'Strength',
self.STRINGSHOT: 'String Shot',
self.STRUGGLE: 'Struggle',
self.STUNSPORE: 'Stun Spore',
self.SUBMISSION: 'Submission',
self.SUBSTITUTE: 'Substitute',
self.SUPERFANG: 'Super Fang',
self.SUPERSONIC: 'Supersonic',
self.SURF: 'Surf',
self.SWIFT: 'Swift',
self.SWORDSDANCE: 'Swords Dance',
self.TACKLE: 'Tackle',
self.TAILWHIP: 'Tail Whip',
self.TAKEDOWN: 'Take Down',
self.TELEPORT: 'Teleport',
self.THRASH: 'Thrash',
self.THUNDERWAVE: 'Thunder Wave',
self.THUNDER: 'Thunder',
self.THUNDERBOLT: 'Thunderbolt',
self.THUNDERPUNCH: 'Thunderpunch',
self.THUNDERSHOCK: 'Thundershock',
self.TOXIC: 'Toxic',
self.TRANSFORM: 'Transform',
self.TRIATTACK: 'Tri Attack',
self.TWINEEDLE: 'Twineedle',
self.VICEGRIP: 'Vicegrip',
self.VINEWHIP: 'Vine Whip',
self.WATERGUN: 'Water Gun',
self.WATERFALL: 'Waterfall',
self.WHIRLWIND: 'Whirlwind',
self.WINGATTACK: 'Wing Attack',
self.WITHDRAW: 'Withdraw',
self.WRAP: 'Wrap',
}
name = MAPPING.get(self, f'<Move {self.value}>')
return name
| 27.753247 | 56 | 0.51839 |
from enum import Enum
class Type(Enum):
NORMAL = 0
FIGHTING = 1
FLYING = 2
POISON = 3
GROUND = 4
ROCK = 5
BIRD = 6
BUG = 7
GHOST = 8
FIRE = 20
WATER = 21
GRASS = 22
ELECTRIC = 23
PSYCHIC = 24
ICE = 25
DRAGON = 26
def __str__(self):
MAPPING = {
self.BIRD: 'Bird',
self.BUG: 'Bug',
self.DRAGON: 'Dragon',
self.ELECTRIC: 'Electric',
self.FIGHTING: 'Fighting',
self.FIRE: 'Fire',
self.FLYING: 'Flying',
self.GHOST: 'Ghost',
self.GRASS: 'Grass',
self.GROUND: 'Ground',
self.ICE: 'Ice',
self.NORMAL: 'Normal',
self.POISON: 'Poison',
self.PSYCHIC: 'Psychic',
self.ROCK: 'Rock',
self.WATER: 'Water',
}
name = MAPPING.get(self, f'<Type {self.value}>')
return name
class Move(Enum):
NONE = 0
POUND = 1
KARATECHOP = 2
DOUBLESLAP = 3
COMETPUNCH = 4
MEGAPUNCH = 5
PAYDAY = 6
FIREPUNCH = 7
ICEPUNCH = 8
THUNDERPUNCH = 9
SCRATCH = 10
VICEGRIP = 11
GUILLOTINE = 12
RAZORWIND = 13
SWORDSDANCE = 14
CUT = 15
GUST = 16
WINGATTACK = 17
WHIRLWIND = 18
FLY = 19
BIND = 20
SLAM = 21
VINEWHIP = 22
STOMP = 23
DOUBLEKICK = 24
MEGAKICK = 25
JUMPKICK = 26
ROLLINGKICK = 27
SANDATTACK = 28
HEADBUTT = 29
HORNATTACK = 30
FURYATTACK = 31
HORNDRILL = 32
TACKLE = 33
BODYSLAM = 34
WRAP = 35
TAKEDOWN = 36
THRASH = 37
DOUBLEEDGE = 38
TAILWHIP = 39
POISONSTING = 40
TWINEEDLE = 41
PINMISSILE = 42
LEER = 43
BITE = 44
GROWL = 45
ROAR = 46
SING = 47
SUPERSONIC = 48
SONICBOOM = 49
DISABLE = 50
ACID = 51
EMBER = 52
FLAMETHROWER = 53
MIST = 54
WATERGUN = 55
HYDROPUMP = 56
SURF = 57
ICEBEAM = 58
BLIZZARD = 59
PSYBEAM = 60
BUBBLEBEAM = 61
AURORABEAM = 62
HYPERBEAM = 63
PECK = 64
DRILLPECK = 65
SUBMISSION = 66
LOWKICK = 67
COUNTER = 68
SEISMICTOSS = 69
STRENGTH = 70
ABSORB = 71
MEGADRAIN = 72
LEECHSEED = 73
GROWTH = 74
RAZORLEAF = 75
SOLARBEAM = 76
POISONPOWDER = 77
STUNSPORE = 78
SLEEPPOWDER = 79
PETALDANCE = 80
STRINGSHOT = 81
DRAGONRAGE = 82
FIRESPIN = 83
THUNDERSHOCK = 84
THUNDERBOLT = 85
THUNDERWAVE = 86
THUNDER = 87
ROCKTHROW = 88
EARTHQUAKE = 89
FISSURE = 90
DIG = 91
TOXIC = 92
CONFUSION = 93
PSYCHIC = 94
HYPNOSIS = 95
MEDITATE = 96
AGILITY = 97
QUICKATTACK = 98
RAGE = 99
TELEPORT = 100
NIGHTSHADE = 101
MIMIC = 102
SCREECH = 103
DOUBLETEAM = 104
RECOVER = 105
HARDEN = 106
MINIMIZE = 107
SMOKESCREEN = 108
CONFUSERAY = 109
WITHDRAW = 110
DEFENSECURL = 111
BARRIER = 112
LIGHTSCREEN = 113
HAZE = 114
REFLECT = 115
FOCUSENERGY = 116
BIDE = 117
METRONOME = 118
MIRRORMOVE = 119
SELFDESTRUCT = 120
EGGBOMB = 121
LICK = 122
SMOG = 123
SLUDGE = 124
BONECLUB = 125
FIREBLAST = 126
WATERFALL = 127
CLAMP = 128
SWIFT = 129
SKULLBASH = 130
SPIKECANNON = 131
CONSTRICT = 132
AMNESIA = 133
KINESIS = 134
SOFTBOILED = 135
HIJUMPKICK = 136
GLARE = 137
DREAMEATER = 138
POISONGAS = 139
BARRAGE = 140
LEECHLIFE = 141
LOVELYKISS = 142
SKYATTACK = 143
TRANSFORM = 144
BUBBLE = 145
DIZZYPUNCH = 146
SPORE = 147
FLASH = 148
PSYWAVE = 149
SPLASH = 150
ACIDARMOR = 151
CRABHAMMER = 152
EXPLOSION = 153
FURYSWIPES = 154
BONEMERANG = 155
REST = 156
ROCKSLIDE = 157
HYPERFANG = 158
SHARPEN = 159
CONVERSION = 160
TRIATTACK = 161
SUPERFANG = 162
SLASH = 163
SUBSTITUTE = 164
STRUGGLE = 165
def __str__(self):
MAPPING = {
self.NONE: '-',
self.ABSORB: 'Absorb',
self.ACIDARMOR: 'Acid Armor',
self.ACID: 'Acid',
self.AGILITY: 'Agility',
self.AMNESIA: 'Amnesia',
self.AURORABEAM: 'Aurora Beam',
self.BARRAGE: 'Barrage',
self.BARRIER: 'Barrier',
self.BIDE: 'Bide',
self.BIND: 'Bind',
self.BITE: 'Bite',
self.BLIZZARD: 'Blizzard',
self.BODYSLAM: 'Body Slam',
self.BONECLUB: 'Bone Club',
self.BONEMERANG: 'Bonemerang',
self.BUBBLE: 'Bubble',
self.BUBBLEBEAM: 'Bubblebeam',
self.CLAMP: 'Clamp',
self.COMETPUNCH: 'Comet Punch',
self.CONFUSERAY: 'Confuse Ray',
self.CONFUSION: 'Confusion',
self.CONSTRICT: 'Constrict',
self.CONVERSION: 'Conversion',
self.COUNTER: 'Counter',
self.CRABHAMMER: 'Crabhammer',
self.CUT: 'Cut',
self.DEFENSECURL: 'Defense Curl',
self.DIG: 'Dig',
self.DISABLE: 'Disable',
self.DIZZYPUNCH: 'Dizzy Punch',
self.DOUBLEKICK: 'Double Kick',
self.DOUBLETEAM: 'Double Team',
self.DOUBLEEDGE: 'Double-Edge',
self.DOUBLESLAP: 'Doubleslap',
self.DRAGONRAGE: 'Dragon Rage',
self.DREAMEATER: 'Dream Eater',
self.DRILLPECK: 'Drill Peck',
self.EARTHQUAKE: 'Earthquake',
self.EGGBOMB: 'Egg Bomb',
self.EMBER: 'Ember',
self.EXPLOSION: 'Explosion',
self.FIREBLAST: 'Fire Blast',
self.FIREPUNCH: 'Fire Punch',
self.FIRESPIN: 'Fire Spin',
self.FISSURE: 'Fissure',
self.FLAMETHROWER: 'Flamethrower',
self.FLASH: 'Flash',
self.FLY: 'Fly',
self.FOCUSENERGY: 'Focus Energy',
self.FURYATTACK: 'Fury Attack',
self.FURYSWIPES: 'Fury Swipes',
self.GLARE: 'Glare',
self.GROWL: 'Growl',
self.GROWTH: 'Growth',
self.GUILLOTINE: 'Guillotine',
self.GUST: 'Gust',
self.HARDEN: 'Harden',
self.HAZE: 'Haze',
self.HEADBUTT: 'Headbutt',
self.HIJUMPKICK: 'Hi Jump Kick',
self.HORNATTACK: 'Horn Attack',
self.HORNDRILL: 'Horn Drill',
self.HYDROPUMP: 'Hydro Pump',
self.HYPERBEAM: 'Hyper Beam',
self.HYPERFANG: 'Hyper Fang',
self.HYPNOSIS: 'Hypnosis',
self.ICEBEAM: 'Ice Beam',
self.ICEPUNCH: 'Ice Punch',
self.JUMPKICK: 'Jump Kick',
self.KARATECHOP: 'Karate Chop',
self.KINESIS: 'Kinesis',
self.LEECHLIFE: 'Leech Life',
self.LEECHSEED: 'Leech Seed',
self.LEER: 'Leer',
self.LICK: 'Lick',
self.LIGHTSCREEN: 'Light Screen',
self.LOVELYKISS: 'Lovely Kiss',
self.LOWKICK: 'Low Kick',
self.MEDITATE: 'Meditate',
self.MEGADRAIN: 'Mega Drain',
self.MEGAKICK: 'Mega Kick',
self.MEGAPUNCH: 'Mega Punch',
self.METRONOME: 'Metronome',
self.MIMIC: 'Mimic',
self.MINIMIZE: 'Minimize',
self.MIRRORMOVE: 'Mirror Move',
self.MIST: 'Mist',
self.NIGHTSHADE: 'Night Shade',
self.PAYDAY: 'Pay Day',
self.PECK: 'Peck',
self.PETALDANCE: 'Petal Dance',
self.PINMISSILE: 'Pin Missile',
self.POISONGAS: 'Poison Gas',
self.POISONSTING: 'Poison Sting',
self.POISONPOWDER: 'Poisonpowder',
self.POUND: 'Pound',
self.PSYBEAM: 'Psybeam',
self.PSYCHIC: 'Psychic',
self.PSYWAVE: 'Psywave',
self.QUICKATTACK: 'Quick Attack',
self.RAGE: 'Rage',
self.RAZORLEAF: 'Razor Leaf',
self.RAZORWIND: 'Razor Wind',
self.RECOVER: 'Recover',
self.REFLECT: 'Reflect',
self.REST: 'Rest',
self.ROAR: 'Roar',
self.ROCKSLIDE: 'Rock Slide',
self.ROCKTHROW: 'Rock Throw',
self.ROLLINGKICK: 'Rolling Kick',
self.SANDATTACK: 'Sand-Attack',
self.SCRATCH: 'Scratch',
self.SCREECH: 'Screech',
self.SEISMICTOSS: 'Seismic Toss',
self.SELFDESTRUCT: 'Selfdestruct',
self.SHARPEN: 'Sharpen',
self.SING: 'Sing',
self.SKULLBASH: 'Skull Bash',
self.SKYATTACK: 'Sky Attack',
self.SLAM: 'Slam',
self.SLASH: 'Slash',
self.SLEEPPOWDER: 'Sleep Powder',
self.SLUDGE: 'Sludge',
self.SMOG: 'Smog',
self.SMOKESCREEN: 'Smokescreen',
self.SOFTBOILED: 'Softboiled',
self.SOLARBEAM: 'Solarbeam',
self.SONICBOOM: 'Sonicboom',
self.SPIKECANNON: 'Spike Cannon',
self.SPLASH: 'Splash',
self.SPORE: 'Spore',
self.STOMP: 'Stomp',
self.STRENGTH: 'Strength',
self.STRINGSHOT: 'String Shot',
self.STRUGGLE: 'Struggle',
self.STUNSPORE: 'Stun Spore',
self.SUBMISSION: 'Submission',
self.SUBSTITUTE: 'Substitute',
self.SUPERFANG: 'Super Fang',
self.SUPERSONIC: 'Supersonic',
self.SURF: 'Surf',
self.SWIFT: 'Swift',
self.SWORDSDANCE: 'Swords Dance',
self.TACKLE: 'Tackle',
self.TAILWHIP: 'Tail Whip',
self.TAKEDOWN: 'Take Down',
self.TELEPORT: 'Teleport',
self.THRASH: 'Thrash',
self.THUNDERWAVE: 'Thunder Wave',
self.THUNDER: 'Thunder',
self.THUNDERBOLT: 'Thunderbolt',
self.THUNDERPUNCH: 'Thunderpunch',
self.THUNDERSHOCK: 'Thundershock',
self.TOXIC: 'Toxic',
self.TRANSFORM: 'Transform',
self.TRIATTACK: 'Tri Attack',
self.TWINEEDLE: 'Twineedle',
self.VICEGRIP: 'Vicegrip',
self.VINEWHIP: 'Vine Whip',
self.WATERGUN: 'Water Gun',
self.WATERFALL: 'Waterfall',
self.WHIRLWIND: 'Whirlwind',
self.WINGATTACK: 'Wing Attack',
self.WITHDRAW: 'Withdraw',
self.WRAP: 'Wrap',
}
name = MAPPING.get(self, f'<Move {self.value}>')
return name
| true | true |
f7310cbb1ac1ab0317dcf30418fa49e6095d48e7 | 2,760 | py | Python | maps/ors.py | sackh/maps-cli | 64cc1877518c88bc9b885ebc22580b595bee6fcc | [
"MIT"
] | 5 | 2021-01-21T08:19:43.000Z | 2021-12-12T06:20:53.000Z | maps/ors.py | sackh/maps-cli | 64cc1877518c88bc9b885ebc22580b595bee6fcc | [
"MIT"
] | null | null | null | maps/ors.py | sackh/maps-cli | 64cc1877518c88bc9b885ebc22580b595bee6fcc | [
"MIT"
] | null | null | null | """This module defines all the ORS(https://openrouteservice.org/services/) commands."""
import os
import click
import openrouteservice as opnrs
import simplejson as json
from geojsonio import display as geo_display
from maps.exceptions import ApiKeyNotFoundError
from maps.utils import yield_subcommands
@click.group()
@click.pass_context
def ors(ctx):
"""ORS (https://openrouteservice.org/) provider."""
ctx.obj = {}
@ors.command()
def show():
"""show list of all sub commands."""
for sub in yield_subcommands(ors):
click.secho(sub, fg="green")
@ors.command(short_help="forward or reverse geocode for an address or coordinates.")
@click.argument("query", required=True)
@click.option("--apikey", help="Your ORS API key", type=str)
@click.option(
"--forward/--reverse",
default=True,
show_default=True,
help="Perform a forward or reverse geocode",
)
@click.option("--raw", is_flag=True)
@click.option("--display", help="Display result in browser", is_flag=True)
@click.pass_context
def geocoding(ctx, query, apikey, forward, raw, display):
"""
Open Route Service geocoding service.
\f
:param ctx: A context dictionary.
:param query: A string to represent address query for geocoding.
:param apikey: An API key for authentication.
:param forward: A boolean flag for forward/reverse geocoding.
:param raw: A boolean flag to show api response as it is.
:param display: A boolean flag to show result in web browser.
:return: None.
"""
apikey = apikey or os.environ.get("ORS_APIKEY")
if apikey is None:
raise ApiKeyNotFoundError(
"Please pass Open Route Service API KEY as --apikey or set it as environment "
"variable in ORS_APIKEY "
)
ctx.obj["apikey"] = apikey
geolocator = opnrs.Client(key=ctx.obj["apikey"])
if forward:
geocode = geolocator.pelias_search(text=query)
if raw:
click.secho(json.dumps(geocode, indent=2), fg="green")
elif display:
geocode.pop("geocoding")
geo_display(json.dumps(geocode))
else:
for feature in geocode["features"]:
coords = feature["geometry"]["coordinates"]
result = {"lat": coords[1], "lon": coords[0]}
click.secho(json.dumps(result, indent=2), fg="green")
else:
coordinate = query.split(",")
reverse = geolocator.pelias_reverse(point=coordinate, validate=False)
if raw:
for result in reverse["features"]:
click.secho(json.dumps(result, indent=2), fg="green")
else:
for result in reverse["features"]:
click.secho(result["properties"]["label"], fg="green")
| 34.074074 | 90 | 0.648913 | import os
import click
import openrouteservice as opnrs
import simplejson as json
from geojsonio import display as geo_display
from maps.exceptions import ApiKeyNotFoundError
from maps.utils import yield_subcommands
@click.group()
@click.pass_context
def ors(ctx):
ctx.obj = {}
@ors.command()
def show():
for sub in yield_subcommands(ors):
click.secho(sub, fg="green")
@ors.command(short_help="forward or reverse geocode for an address or coordinates.")
@click.argument("query", required=True)
@click.option("--apikey", help="Your ORS API key", type=str)
@click.option(
"--forward/--reverse",
default=True,
show_default=True,
help="Perform a forward or reverse geocode",
)
@click.option("--raw", is_flag=True)
@click.option("--display", help="Display result in browser", is_flag=True)
@click.pass_context
def geocoding(ctx, query, apikey, forward, raw, display):
apikey = apikey or os.environ.get("ORS_APIKEY")
if apikey is None:
raise ApiKeyNotFoundError(
"Please pass Open Route Service API KEY as --apikey or set it as environment "
"variable in ORS_APIKEY "
)
ctx.obj["apikey"] = apikey
geolocator = opnrs.Client(key=ctx.obj["apikey"])
if forward:
geocode = geolocator.pelias_search(text=query)
if raw:
click.secho(json.dumps(geocode, indent=2), fg="green")
elif display:
geocode.pop("geocoding")
geo_display(json.dumps(geocode))
else:
for feature in geocode["features"]:
coords = feature["geometry"]["coordinates"]
result = {"lat": coords[1], "lon": coords[0]}
click.secho(json.dumps(result, indent=2), fg="green")
else:
coordinate = query.split(",")
reverse = geolocator.pelias_reverse(point=coordinate, validate=False)
if raw:
for result in reverse["features"]:
click.secho(json.dumps(result, indent=2), fg="green")
else:
for result in reverse["features"]:
click.secho(result["properties"]["label"], fg="green")
| true | true |
f7310d1859fb85e6d93daf01566a93a660d350f0 | 56 | py | Python | virtual/lib/python3.6/site-packages/imagekit/forms/__init__.py | kenmutuma001/galleria | 1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72 | [
"Unlicense"
] | 2 | 2019-04-15T10:28:42.000Z | 2019-04-26T21:48:17.000Z | virtual/lib/python3.6/site-packages/imagekit/forms/__init__.py | kenmutuma001/galleria | 1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72 | [
"Unlicense"
] | 12 | 2020-02-12T00:25:14.000Z | 2022-03-11T23:48:53.000Z | virtual/lib/python3.6/site-packages/imagekit/forms/__init__.py | kenmutuma001/galleria | 1bbb9fbd3ca8bf7a030dbcbcbd1674d392055d72 | [
"Unlicense"
] | 1 | 2021-05-24T10:19:13.000Z | 2021-05-24T10:19:13.000Z | # flake8: noqa
from .fields import ProcessedImageField
| 14 | 39 | 0.803571 |
from .fields import ProcessedImageField
| true | true |
f7310fabca6b6ef898997efb6b048ead96681b15 | 4,598 | py | Python | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 29 | 2018-09-19T01:16:27.000Z | 2022-03-29T14:35:36.000Z | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-04-12T18:37:36.000Z | 2022-02-10T00:27:55.000Z | plastering/evaluator.py | MingzheWu418/plastering | 322531e934c3acf2ecc8f520b37a6d255b9959c2 | [
"MIT"
] | 14 | 2019-03-05T23:44:11.000Z | 2022-03-18T07:29:31.000Z | from copy import deepcopy
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pdb
def binarize_labels(true_labels, pred_labels):
srcids = list(pred_labels.keys())
tot_labels = [list(labels) for labels in
list(pred_labels.values()) + list(true_labels.values())]
mlb = MultiLabelBinarizer().fit(tot_labels)
pred_mat = mlb.transform(pred_labels.values())
true_mat = mlb.transform(true_labels.values())
return true_mat, pred_mat
def get_micro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_micro_f1_mat(true_mat, pred_mat)
def get_macro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_macro_f1_mat(true_mat, pred_mat)
def get_macro_f1_mat(true_mat, pred_mat):
assert true_mat.shape == pred_mat.shape
f1s = []
for i in range(0, true_mat.shape[1]):
if 1 not in true_mat[:,i]:
continue
f1 = f1_score(true_mat[:,i], pred_mat[:,i])
f1s.append(f1)
return np.mean(f1s)
def get_multiclass_micro_f1(true_labels, pred_labels):
le = LabelEncoder()
#pred_mat, true_mat = binarize_labels(true_labels, pred_labels)
#f1_custom = get_micro_f1_mat(true_mat, pred_mat)
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='micro')
#f1_weighted = f1_score(true_encoded, pred_encoded, average='weighted')
#pdb.set_trace()
return f1_micro
def get_multiclass_macro_f1(true_labels, pred_labels):
le = LabelEncoder()
#pred_mat, true_mat = binarize_labels(true_labels, pred_labels)
#f1_custom = get_micro_f1_mat(true_mat, pred_mat)
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='macro')
#f1_weighted = f1_score(true_encoded, pred_encoded, average='weighted')
#pdb.set_trace()
return f1_micro
def get_micro_f1_mat(true_mat, pred_mat):
TP = np.sum(np.bitwise_and(true_mat==1, pred_mat==1))
TN = np.sum(np.bitwise_and(true_mat==0, pred_mat==0))
FN = np.sum(np.bitwise_and(true_mat==1, pred_mat==0))
FP = np.sum(np.bitwise_and(true_mat==0, pred_mat==1))
micro_prec = TP / (TP + FP)
micro_rec = TP / (TP + FN)
return 2 * micro_prec * micro_rec / (micro_prec + micro_rec)
def get_point_accuracy(true_tagsets, pred_tagsets):
target_srcids = pred_tagsets.keys()
return sum([true_tagsets[srcid].lower() == pred_tagsets[srcid].lower()
for srcid in target_srcids]) / len(target_srcids)
def get_accuracy(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(pred_tagsets)
true = set(true_tagsets_sets[srcid])
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def exclude_common_tagsets(tagsets):
return [tagset for tagset in tagsets
if tagset.split('-')[0] != 'networkadapter' and
tagset.split('-')[0] != 'building'
]
def get_accuracy_conservative(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(exclude_common_tagsets(pred_tagsets))
true = set(exclude_common_tagsets(true_tagsets_sets[srcid]))
if len(true) == 0:
jaccard = 1
else:
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def get_set_accuracy(true_label_sets, pred_tagset_sets):
# Accuracy per sample = #intersection / #union
# Accuracy over set = average of the accuracy per sample
# Input params dictionary based on the srcids
for srcid, pred_tagset_set in pred_tagset_sets.items():
pass #TODO
| 38.966102 | 75 | 0.707916 | from copy import deepcopy
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pdb
def binarize_labels(true_labels, pred_labels):
srcids = list(pred_labels.keys())
tot_labels = [list(labels) for labels in
list(pred_labels.values()) + list(true_labels.values())]
mlb = MultiLabelBinarizer().fit(tot_labels)
pred_mat = mlb.transform(pred_labels.values())
true_mat = mlb.transform(true_labels.values())
return true_mat, pred_mat
def get_micro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_micro_f1_mat(true_mat, pred_mat)
def get_macro_f1(true_labels, pred_labels):
true_mat, pred_mat = binarize_labels(true_labels, pred_labels)
return get_macro_f1_mat(true_mat, pred_mat)
def get_macro_f1_mat(true_mat, pred_mat):
assert true_mat.shape == pred_mat.shape
f1s = []
for i in range(0, true_mat.shape[1]):
if 1 not in true_mat[:,i]:
continue
f1 = f1_score(true_mat[:,i], pred_mat[:,i])
f1s.append(f1)
return np.mean(f1s)
def get_multiclass_micro_f1(true_labels, pred_labels):
le = LabelEncoder()
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='micro')
return f1_micro
def get_multiclass_macro_f1(true_labels, pred_labels):
le = LabelEncoder()
srcids = list(true_labels.keys())
true_label_list = [true_labels[srcid] for srcid in srcids]
pred_label_list = [pred_labels[srcid] for srcid in srcids]
le = LabelEncoder()
le.fit(true_label_list + pred_label_list)
true_encoded = le.transform(true_label_list)
pred_encoded = le.transform(pred_label_list)
f1_micro = f1_score(true_encoded, pred_encoded, average='macro')
return f1_micro
def get_micro_f1_mat(true_mat, pred_mat):
TP = np.sum(np.bitwise_and(true_mat==1, pred_mat==1))
TN = np.sum(np.bitwise_and(true_mat==0, pred_mat==0))
FN = np.sum(np.bitwise_and(true_mat==1, pred_mat==0))
FP = np.sum(np.bitwise_and(true_mat==0, pred_mat==1))
micro_prec = TP / (TP + FP)
micro_rec = TP / (TP + FN)
return 2 * micro_prec * micro_rec / (micro_prec + micro_rec)
def get_point_accuracy(true_tagsets, pred_tagsets):
target_srcids = pred_tagsets.keys()
return sum([true_tagsets[srcid].lower() == pred_tagsets[srcid].lower()
for srcid in target_srcids]) / len(target_srcids)
def get_accuracy(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(pred_tagsets)
true = set(true_tagsets_sets[srcid])
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def exclude_common_tagsets(tagsets):
return [tagset for tagset in tagsets
if tagset.split('-')[0] != 'networkadapter' and
tagset.split('-')[0] != 'building'
]
def get_accuracy_conservative(true_tagsets_sets, pred_tagsets_sets):
acc = 0
for srcid, pred_tagsets in pred_tagsets_sets.items():
pred = set(exclude_common_tagsets(pred_tagsets))
true = set(exclude_common_tagsets(true_tagsets_sets[srcid]))
if len(true) == 0:
jaccard = 1
else:
jaccard = len(pred.intersection(true)) / len(pred.union(true))
acc += jaccard
return acc / len(pred_tagsets_sets)
def get_set_accuracy(true_label_sets, pred_tagset_sets):
ed_tagset_set in pred_tagset_sets.items():
pass
| true | true |
f7310fbb1474ae83999ab53c3f7d66fcd8c2abb3 | 136,240 | py | Python | Lib/test/test_codecs.py | nkhandare/python31all- | c6c792f2db5938def0261378acb5cf1de440ff43 | [
"bzip2-1.0.6"
] | 33 | 2021-07-25T14:23:35.000Z | 2022-03-31T00:17:30.000Z | Lib/test/test_codecs.py | nkhandare/python31all- | c6c792f2db5938def0261378acb5cf1de440ff43 | [
"bzip2-1.0.6"
] | 32 | 2019-04-26T12:29:36.000Z | 2022-03-08T14:24:30.000Z | Lib/test/test_codecs.py | val-verde/cpython | 17aa701d799d5e071d83205d877f722f1498a09f | [
"0BSD"
] | 3 | 2019-11-12T15:21:58.000Z | 2020-09-04T14:27:55.000Z | import codecs
import contextlib
import io
import locale
import sys
import unittest
import encodings
from unittest import mock
from test import support
from test.support import os_helper
from test.support import warnings_helper
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
# On small versions of Windows like Windows IoT or Windows Nano Server not all codepages are present
def is_code_page_present(cp):
from ctypes import POINTER, WINFUNCTYPE, WinDLL
from ctypes.wintypes import BOOL, UINT, BYTE, WCHAR, UINT, DWORD
MAX_LEADBYTES = 12 # 5 ranges, 2 bytes ea., 0 term.
MAX_DEFAULTCHAR = 2 # single or double byte
MAX_PATH = 260
class CPINFOEXW(ctypes.Structure):
_fields_ = [("MaxCharSize", UINT),
("DefaultChar", BYTE*MAX_DEFAULTCHAR),
("LeadByte", BYTE*MAX_LEADBYTES),
("UnicodeDefaultChar", WCHAR),
("CodePage", UINT),
("CodePageName", WCHAR*MAX_PATH)]
prototype = WINFUNCTYPE(BOOL, UINT, DWORD, POINTER(CPINFOEXW))
GetCPInfoEx = prototype(("GetCPInfoExW", WinDLL("kernel32")))
info = CPINFOEXW()
return GetCPInfoEx(cp, 0, info)
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using an incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #32110: Test readline() followed by read(n)
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(1), lines[1][0])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read(n) followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #32110: Test read(n) followed by read(n)
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(1), data[5])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[6:106])
# Issue #12446: Test read(n) followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "namereplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
# sequential surrogate characters
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "replace"),
"[??]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
backslashreplace = ''.join('\\x%02x' % b
for b in self.ill_formed_sequence)
self.assertEqual(test_sequence.decode(self.encoding, "backslashreplace"),
before + backslashreplace + after)
def test_incremental_surrogatepass(self):
# Test incremental decoder for surrogatepass handler:
# see issue #24214
# High surrogate
data = '\uD901'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:], True), '\uD901')
# Low surrogate
data = '\uDC02'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:]), '\uDC02')
class UTF32Test(ReadTest, unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest, unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest, unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest, unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(s)
with warnings_helper.check_warnings(('', DeprecationWarning)):
reader = codecs.open(os_helper.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest, unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest, unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
BOM = b''
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode(self.encoding, error_handler),
expected)
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode(self.encoding, "surrogateescape"),
self.BOM + b'[\x80]')
with self.assertRaises(UnicodeEncodeError) as cm:
"[\uDC80\uD800\uDFFF]".encode(self.encoding, "surrogateescape")
exc = cm.exception
self.assertEqual(exc.object[exc.start:exc.end], '\uD800\uDFFF')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode(self.encoding, "surrogatepass"),
self.BOM + b"abc\xed\xa0\x80def")
self.assertEqual("\U00010fff\uD800".encode(self.encoding, "surrogatepass"),
self.BOM + b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "surrogatepass"),
self.BOM + b'[\xed\xa0\x80\xed\xb2\x80]')
self.assertEqual(b"abc\xed\xa0\x80def".decode(self.encoding, "surrogatepass"),
"abc\ud800def")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode(self.encoding, "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode(self.encoding, "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode(self.encoding, "surrogatepass")
def test_incremental_errors(self):
# Test that the incremental decoder can fail with final=False.
# See issue #24214
cases = [b'\x80', b'\xBF', b'\xC0', b'\xC1', b'\xF5', b'\xF6', b'\xFF']
for prefix in (b'\xC2', b'\xDF', b'\xE0', b'\xE0\xA0', b'\xEF',
b'\xEF\xBF', b'\xF0', b'\xF0\x90', b'\xF0\x90\x80',
b'\xF4', b'\xF4\x8F', b'\xF4\x8F\xBF'):
for suffix in b'\x7F', b'\xC0':
cases.append(prefix + suffix)
cases.extend((b'\xE0\x80', b'\xE0\x9F', b'\xED\xA0\x80',
b'\xED\xBF\xBF', b'\xF0\x80', b'\xF0\x8F', b'\xF4\x90'))
for data in cases:
with self.subTest(data=data):
dec = codecs.getincrementaldecoder(self.encoding)()
self.assertRaises(UnicodeDecodeError, dec.decode, data)
class UTF7Test(ReadTest, unittest.TestCase):
encoding = "utf-7"
def test_ascii(self):
# Set D (directly encoded characters)
set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'\'(),-./:?')
self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii'))
self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d)
# Set O (optional direct characters)
set_o = ' !"#$%&*;<=>@[]^_`{|}'
self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii'))
self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o)
# +
self.assertEqual('a+b'.encode(self.encoding), b'a+-b')
self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b')
# White spaces
ws = ' \t\n\r'
self.assertEqual(ws.encode(self.encoding), ws.encode('ascii'))
self.assertEqual(ws.encode('ascii').decode(self.encoding), ws)
# Other ASCII characters
other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) -
set(set_d + set_o + '+' + ws)))
self.assertEqual(other_ascii.encode(self.encoding),
b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'\xffb', '\ufffdb'),
(b'a\xffb', 'a\ufffdb'),
(b'a\xff\xffb', 'a\ufffd\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+IKw-b\xff', 'a\u20acb\ufffd'),
(b'a+IKw\xffb', 'a\u20ac\ufffdb'),
(b'a+@b', 'a\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0')
self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-')
self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding),
b'+IKwgrNgB3KA-')
self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
def test_lone_surrogates(self):
tests = [
(b'a+2AE-b', 'a\ud801b'),
(b'a+2AE\xffb', 'a\ufffdb'),
(b'a+2AE', 'a\ufffd'),
(b'a+2AEA-b', 'a\ufffdb'),
(b'a+2AH-b', 'a\ufffdb'),
(b'a+IKzYAQ-b', 'a\u20ac\ud801b'),
(b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'),
(b'a+IKzYAQA-b', 'a\u20ac\ufffdb'),
(b'a+IKzYAd-b', 'a\u20ac\ufffdb'),
(b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'),
(b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'),
(b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(UTF8Test, unittest.TestCase):
encoding = "utf-8-sig"
BOM = codecs.BOM_UTF8
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
self.assertEqual(codecs.escape_decode(bytearray()), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", b"[\\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\x410]", b"[A0]")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, b"\\" + b)
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), b"\\" + b.upper())
with self.assertWarns(DeprecationWarning):
check(br"\8", b"\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", b"\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", b"\\\xfa")
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
def test_decode_invalid(self):
testcases = [
(b"xn--w&", "strict", UnicodeError()),
(b"xn--w&", "ignore", "xn-"),
]
for puny, errors, expected in testcases:
with self.subTest(puny=puny, errors=errors):
if isinstance(expected, Exception):
self.assertRaises(UnicodeError, puny.decode, "punycode", errors)
else:
self.assertEqual(puny.decode("punycode", errors), expected)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
def test_errors(self):
"""Only supports "strict" error handler"""
"python.org".encode("idna", "strict")
b"python.org".decode("idna", "strict")
for errors in ("ignore", "replace", "backslashreplace",
"surrogateescape"):
self.assertRaises(Exception, "python.org".encode, "idna", errors)
self.assertRaises(Exception,
b"python.org".decode, "idna", errors)
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
# test keywords
self.assertEqual(codecs.decode(obj=b'\xe4\xf6\xfc', encoding='latin-1'),
'\xe4\xf6\xfc')
self.assertEqual(codecs.decode(b'[\xff]', 'ascii', errors='ignore'),
'[]')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
# test keywords
self.assertEqual(codecs.encode(obj='\xe4\xf6\xfc', encoding='latin-1'),
b'\xe4\xf6\xfc')
self.assertEqual(codecs.encode('[\xff]', 'ascii', errors='ignore'),
b'[]')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_unregister(self):
name = "nonexistent_codec_name"
search_function = mock.Mock()
codecs.register(search_function)
self.assertRaises(TypeError, codecs.lookup, name)
search_function.assert_called_with(name)
search_function.reset_mock()
codecs.unregister(search_function)
self.assertRaises(LookupError, codecs.lookup, name)
search_function.assert_not_called()
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
def test_all(self):
api = (
"encode", "decode",
"register", "CodecInfo", "Codec", "IncrementalEncoder",
"IncrementalDecoder", "StreamReader", "StreamWriter", "lookup",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"register_error", "lookup_error",
"strict_errors", "replace_errors", "ignore_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"namereplace_errors",
"open", "EncodedFile",
"iterencode", "iterdecode",
"BOM", "BOM_BE", "BOM_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_BE", "BOM_UTF16_LE",
"BOM_UTF32", "BOM_UTF32_BE", "BOM_UTF32_LE",
"BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", # Undocumented
"StreamReaderWriter", "StreamRecoder",
)
self.assertCountEqual(api, codecs.__all__)
for api in codecs.__all__:
getattr(codecs, api)
def test_open(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for mode in ('w', 'r', 'r+', 'w+', 'a', 'a+'):
with self.subTest(mode), \
codecs.open(os_helper.TESTFN, mode, 'ascii') as file:
self.assertIsInstance(file, codecs.StreamReaderWriter)
def test_undefined(self):
self.assertRaises(UnicodeError, codecs.encode, 'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.encode, '', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'', 'undefined')
for errors in ('strict', 'ignore', 'replace', 'backslashreplace'):
self.assertRaises(UnicodeError,
codecs.encode, 'abc', 'undefined', errors)
self.assertRaises(UnicodeError,
codecs.decode, b'abc', 'undefined', errors)
def test_file_closes_if_lookup_error_raised(self):
mock_open = mock.mock_open()
with mock.patch('builtins.open', mock_open) as file:
with self.assertRaises(LookupError):
codecs.open(os_helper.TESTFN, 'wt', 'invalid-encoding')
file().close.assert_called()
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_t",
"koi8_u",
"kz1048",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
if hasattr(codecs, "oem_encode"):
all_unicode_encodings.append("oem")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_stateful = [
"punycode",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_stateful:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab\ufffe"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: None}),
("ab\\x02", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: -2}
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: 999999999}
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
self.assertTrue(f.closed)
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding a unicode string is supported and gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
class UnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", r"[\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtuvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, "\\" + chr(i))
if b.upper() not in b'UN':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), "\\" + chr(i-32))
with self.assertWarns(DeprecationWarning):
check(br"\8", "\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", "\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", "\\\xfa")
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'',
'',
'',
'\x00',
'\x00',
'\x00\t',
'\x00\t',
'\x00\t\n',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class RawUnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "raw-unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'\x00',
'\x00\t',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class EscapeEncodeTest(unittest.TestCase):
def test_escape_encode(self):
tests = [
(b'', (b'', 0)),
(b'foobar', (b'foobar', 6)),
(b'spam\0eggs', (b'spam\\x00eggs', 9)),
(b'a\'b', (b"a\\'b", 3)),
(b'b\\c', (b'b\\\\c', 3)),
(b'c\nd', (b'c\\nd', 3)),
(b'd\re', (b'd\\re', 3)),
(b'f\x7fg', (b'f\\x7fg', 3)),
]
for data, output in tests:
with self.subTest(data=data):
self.assertEqual(codecs.escape_encode(data), output)
self.assertRaises(TypeError, codecs.escape_encode, 'spam')
self.assertRaises(TypeError, codecs.escape_encode, bytearray(b'spam'))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
# We check all the transform codecs accept memoryview input
# for encoding and decoding
# and also that they roundtrip correctly
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_denylists_binary_transforms(self):
# Check binary -> binary codecs give a good error for str input
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = (r"{!r} is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_denylists_text_transforms(self):
# Check str.encode gives a good error message for str -> str codecs
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_denylists_binary_transforms(self):
# Check bytes.decode and bytearray.decode give a good error
# message for binary -> binary codecs
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_denylists_text_transforms(self):
# Check str -> str codec gives a good error for binary input
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
# Check zlib codec gives a good error for malformed input
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
# Check hex codec gives a good error for malformed input
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Unfortunately, the bz2 module throws OSError, which the codec
# machinery currently can't wrap :(
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
def test_quopri_stateless(self):
# Should encode with quotetabs=True
encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
self.assertEqual(encoded, b"space=20tab=09eol=20\n")
# But should still support unescaped tabs and spaces
unescaped = b"space tab eol\n"
self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, b"", "uu-codec")
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple search functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
class ExceptionChainingTest(unittest.TestCase):
def setUp(self):
self.codec_name = 'exception_chaining_test'
codecs.register(_get_test_codec)
self.addCleanup(codecs.unregister, _get_test_codec)
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
# recreate the codec entry) and regrtest refleak hunting (which
# runs the same test instance multiple times). This means we
# need to ensure the codecs call back in to the instance to find
# out which exception to raise rather than binding them in a
# closure to an object that may change on the next run
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
# Issue #22166: Also pop from caches to avoid appearance of ref leaks
encodings._cache.pop(self.codec_name, None)
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
# Helper to dynamically change the object raised by a test codec
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
# http://bugs.python.org/issue19609
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
# The initial codec lookup should not be wrapped
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# The stdlib non-text codecs are now marked so they're
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
# sure the case where an inappropriate output type is produced is
# handled appropriately
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
# No input or output type checks on the codecs module functions
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
# Text model methods should complain
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
r"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
r"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
class CodePageTest(unittest.TestCase):
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00', 'strict', True)
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff', 'strict', True)
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors, True)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors, True)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'namereplace',
b'[\\N{LATIN SMALL LETTER Y WITH DIAERESIS}]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
('\udcff', 'strict', None),
('[\udcff]', 'surrogateescape', b'[\xff]'),
('[\udcff]', 'surrogatepass', None),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'backslashreplace', '[\\xff]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'[\xff]', 'surrogatepass', None),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
(b'\x81\x00abc', 'backslashreplace', '\\x81\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
# test error handlers
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
('\udc98', 'surrogateescape', b'\x98'),
('\udc98', 'surrogatepass', None),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_code_page_decode_flags(self):
# Issue #36312: For some code pages (e.g. UTF-7) flags for
# MultiByteToWideChar() must be set to 0.
if support.verbose:
sys.stdout.write('\n')
for cp in (50220, 50221, 50222, 50225, 50227, 50229,
*range(57002, 57011+1), 65000):
# On small versions of Windows like Windows IoT
# not all codepages are present.
# A missing codepage causes an OSError exception
# so check for the codepage before decoding
if is_code_page_present(cp):
self.assertEqual(codecs.code_page_decode(cp, b'abc'), ('abc', 3), f'cp{cp}')
else:
if support.verbose:
print(f" skipping cp={cp}")
self.assertEqual(codecs.code_page_decode(42, b'abc'),
('\uf061\uf062\uf063', 3))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
def test_mbcs_alias(self):
# Check that looking up our 'default' codepage will return
# mbcs when we don't have a more specific one available
with mock.patch('_winapi.GetACP', return_value=123):
codec = codecs.lookup('cp123')
self.assertEqual(codec.name, 'mbcs')
@support.bigmemtest(size=2**31, memuse=7, dry_run=False)
def test_large_input(self, size):
# Test input longer than INT_MAX.
# Input should contain undecodable bytes before and after
# the INT_MAX limit.
encoded = (b'01234567' * ((size//8)-1) +
b'\x85\x86\xea\xeb\xec\xef\xfc\xfd\xfe\xff')
self.assertEqual(len(encoded), size+2)
decoded = codecs.code_page_decode(932, encoded, 'surrogateescape', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), decoded[1])
self.assertEqual(decoded[0][:10], '0123456701')
self.assertEqual(decoded[0][-20:],
'6701234567'
'\udc85\udc86\udcea\udceb\udcec'
'\udcef\udcfc\udcfd\udcfe\udcff')
@support.bigmemtest(size=2**31, memuse=6, dry_run=False)
def test_large_utf8_input(self, size):
# Test input longer than INT_MAX.
# Input should contain a decodable multi-byte character
# surrounding INT_MAX
encoded = (b'0123456\xed\x84\x80' * (size//8))
self.assertEqual(len(encoded), size // 8 * 10)
decoded = codecs.code_page_decode(65001, encoded, 'ignore', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), size)
self.assertEqual(decoded[0][:10], '0123456\ud10001')
self.assertEqual(decoded[0][-11:], '56\ud1000123456\ud100')
class ASCIITest(unittest.TestCase):
def test_encode(self):
self.assertEqual('abc123'.encode('ascii'), b'abc123')
def test_encode_error(self):
for data, error_handler, expected in (
('[\x80\xff\u20ac]', 'ignore', b'[]'),
('[\x80\xff\u20ac]', 'replace', b'[???]'),
('[\x80\xff\u20ac]', 'xmlcharrefreplace', b'[€ÿ€]'),
('[\x80\xff\u20ac\U000abcde]', 'backslashreplace',
b'[\\x80\\xff\\u20ac\\U000abcde]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('ascii', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\xff'.encode('ascii', 'surrogateescape')
def test_decode(self):
self.assertEqual(b'abc'.decode('ascii'), 'abc')
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode('ascii', error_handler),
expected)
class Latin1Test(unittest.TestCase):
def test_encode(self):
for data, expected in (
('abc', b'abc'),
('\x80\xe9\xff', b'\x80\xe9\xff'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.encode('latin1'), expected)
def test_encode_errors(self):
for data, error_handler, expected in (
('[\u20ac\udc80]', 'ignore', b'[]'),
('[\u20ac\udc80]', 'replace', b'[??]'),
('[\u20ac\U000abcde]', 'backslashreplace',
b'[\\u20ac\\U000abcde]'),
('[\u20ac\udc80]', 'xmlcharrefreplace', b'[€�]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('latin1', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\u20ac'.encode('latin1', 'surrogateescape')
def test_decode(self):
for data, expected in (
(b'abc', 'abc'),
(b'[\x80\xff]', '[\x80\xff]'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.decode('latin1'), expected)
class StreamRecoderTest(unittest.TestCase):
def test_writelines(self):
bio = io.BytesIO()
codec = codecs.lookup('ascii')
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.ascii.StreamReader, encodings.ascii.StreamWriter)
sr.writelines([b'a', b'b'])
self.assertEqual(bio.getvalue(), b'ab')
def test_write(self):
bio = io.BytesIO()
codec = codecs.lookup('latin1')
# Recode from Latin-1 to utf-8.
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.utf_8.StreamReader, encodings.utf_8.StreamWriter)
text = 'àñé'
sr.write(text.encode('latin1'))
self.assertEqual(bio.getvalue(), text.encode('utf-8'))
def test_seeking_read(self):
bio = io.BytesIO('line1\nline2\nline3\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
self.assertEqual(sr.readline(), b'line1\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'line1\n')
self.assertEqual(sr.readline(), b'line2\n')
self.assertEqual(sr.readline(), b'line3\n')
self.assertEqual(sr.readline(), b'')
def test_seeking_write(self):
bio = io.BytesIO('123456789\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
# Test that seek() only resets its internal buffer when offset
# and whence are zero.
sr.seek(2)
sr.write(b'\nabc\n')
self.assertEqual(sr.readline(), b'789\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'1\n')
self.assertEqual(sr.readline(), b'abc\n')
self.assertEqual(sr.readline(), b'789\n')
@unittest.skipIf(_testcapi is None, 'need _testcapi module')
class LocaleCodecTest(unittest.TestCase):
"""
Test indirectly _Py_DecodeUTF8Ex() and _Py_EncodeUTF8Ex().
"""
ENCODING = sys.getfilesystemencoding()
STRINGS = ("ascii", "ulatin1:\xa7\xe9",
"u255:\xff",
"UCS:\xe9\u20ac\U0010ffff",
"surrogates:\uDC80\uDCFF")
BYTES_STRINGS = (b"blatin1:\xa7\xe9", b"b255:\xff")
SURROGATES = "\uDC80\uDCFF"
def encode(self, text, errors="strict"):
return _testcapi.EncodeLocaleEx(text, 0, errors)
def check_encode_strings(self, errors):
for text in self.STRINGS:
with self.subTest(text=text):
try:
expected = text.encode(self.ENCODING, errors)
except UnicodeEncodeError:
with self.assertRaises(RuntimeError) as cm:
self.encode(text, errors)
errmsg = str(cm.exception)
self.assertRegex(errmsg, r"encode error: pos=[0-9]+, reason=")
else:
encoded = self.encode(text, errors)
self.assertEqual(encoded, expected)
def test_encode_strict(self):
self.check_encode_strings("strict")
def test_encode_surrogateescape(self):
self.check_encode_strings("surrogateescape")
def test_encode_surrogatepass(self):
try:
self.encode('', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} encoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_encode_strings("surrogatepass")
def test_encode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.encode('', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
def decode(self, encoded, errors="strict"):
return _testcapi.DecodeLocaleEx(encoded, 0, errors)
def check_decode_strings(self, errors):
is_utf8 = (self.ENCODING == "utf-8")
if is_utf8:
encode_errors = 'surrogateescape'
else:
encode_errors = 'strict'
strings = list(self.BYTES_STRINGS)
for text in self.STRINGS:
try:
encoded = text.encode(self.ENCODING, encode_errors)
if encoded not in strings:
strings.append(encoded)
except UnicodeEncodeError:
encoded = None
if is_utf8:
encoded2 = text.encode(self.ENCODING, 'surrogatepass')
if encoded2 != encoded:
strings.append(encoded2)
for encoded in strings:
with self.subTest(encoded=encoded):
try:
expected = encoded.decode(self.ENCODING, errors)
except UnicodeDecodeError:
with self.assertRaises(RuntimeError) as cm:
self.decode(encoded, errors)
errmsg = str(cm.exception)
self.assertTrue(errmsg.startswith("decode error: "), errmsg)
else:
decoded = self.decode(encoded, errors)
self.assertEqual(decoded, expected)
def test_decode_strict(self):
self.check_decode_strings("strict")
def test_decode_surrogateescape(self):
self.check_decode_strings("surrogateescape")
def test_decode_surrogatepass(self):
try:
self.decode(b'', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} decoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_decode_strings("surrogatepass")
def test_decode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.decode(b'', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
class Rot13Test(unittest.TestCase):
"""Test the educational ROT-13 codec."""
def test_encode(self):
ciphertext = codecs.encode("Caesar liked ciphers", 'rot-13')
self.assertEqual(ciphertext, 'Pnrfne yvxrq pvcuref')
def test_decode(self):
plaintext = codecs.decode('Rg gh, Oehgr?', 'rot-13')
self.assertEqual(plaintext, 'Et tu, Brute?')
def test_incremental_encode(self):
encoder = codecs.getincrementalencoder('rot-13')()
ciphertext = encoder.encode('ABBA nag Cheryl Baker')
self.assertEqual(ciphertext, 'NOON ant Purely Onxre')
def test_incremental_decode(self):
decoder = codecs.getincrementaldecoder('rot-13')()
plaintext = decoder.decode('terra Ares envy tha')
self.assertEqual(plaintext, 'green Nerf rail gun')
class Rot13UtilTest(unittest.TestCase):
"""Test the ROT-13 codec via rot13 function,
i.e. the user has done something like:
$ echo "Hello World" | python -m encodings.rot_13
"""
def test_rot13_func(self):
infile = io.StringIO('Gb or, be abg gb or, gung vf gur dhrfgvba')
outfile = io.StringIO()
encodings.rot_13.rot13(infile, outfile)
outfile.seek(0)
plain_text = outfile.read()
self.assertEqual(
plain_text,
'To be, or not to be, that is the question')
class CodecNameNormalizationTest(unittest.TestCase):
"""Test codec name normalization"""
def test_codecs_lookup(self):
FOUND = (1, 2, 3, 4)
NOT_FOUND = (None, None, None, None)
def search_function(encoding):
if encoding == "aaa_8":
return FOUND
else:
return NOT_FOUND
codecs.register(search_function)
self.addCleanup(codecs.unregister, search_function)
self.assertEqual(FOUND, codecs.lookup('aaa_8'))
self.assertEqual(FOUND, codecs.lookup('AAA-8'))
self.assertEqual(FOUND, codecs.lookup('AAA---8'))
self.assertEqual(FOUND, codecs.lookup('AAA 8'))
self.assertEqual(FOUND, codecs.lookup('aaa\xe9\u20ac-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA...8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('a\xe9\u20ac-8'))
def test_encodings_normalize_encoding(self):
# encodings.normalize_encoding() ignores non-ASCII characters.
normalize = encodings.normalize_encoding
self.assertEqual(normalize('utf_8'), 'utf_8')
self.assertEqual(normalize('utf\xE9\u20AC\U0010ffff-8'), 'utf_8')
self.assertEqual(normalize('utf 8'), 'utf_8')
# encodings.normalize_encoding() doesn't convert
# characters to lower case.
self.assertEqual(normalize('UTF 8'), 'UTF_8')
self.assertEqual(normalize('utf.8'), 'utf.8')
self.assertEqual(normalize('utf...8'), 'utf...8')
if __name__ == "__main__":
unittest.main()
| 38.649645 | 113 | 0.543188 | import codecs
import contextlib
import io
import locale
import sys
import unittest
import encodings
from unittest import mock
from test import support
from test.support import os_helper
from test.support import warnings_helper
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
def is_code_page_present(cp):
from ctypes import POINTER, WINFUNCTYPE, WinDLL
from ctypes.wintypes import BOOL, UINT, BYTE, WCHAR, UINT, DWORD
MAX_LEADBYTES = 12
MAX_DEFAULTCHAR = 2
MAX_PATH = 260
class CPINFOEXW(ctypes.Structure):
_fields_ = [("MaxCharSize", UINT),
("DefaultChar", BYTE*MAX_DEFAULTCHAR),
("LeadByte", BYTE*MAX_LEADBYTES),
("UnicodeDefaultChar", WCHAR),
("CodePage", UINT),
("CodePageName", WCHAR*MAX_PATH)]
prototype = WINFUNCTYPE(BOOL, UINT, DWORD, POINTER(CPINFOEXW))
GetCPInfoEx = prototype(("GetCPInfoExW", WinDLL("kernel32")))
info = CPINFOEXW()
return GetCPInfoEx(cp, 0, info)
class Queue(object):
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0]
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
if not state[1]:
d.setstate((state[0][:0], 0))
self.assertTrue(not d.decode(state[0]))
self.assertEqual(state, d.getstate())
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using an incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults, strict=True):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
ertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
tEqual(f.readline(), lines[0])
self.assertEqual(f.read(1), lines[1][0])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[len(lines[0]) + 1:][:100])
al(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
sertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(1), data[5])
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(100), data[6:106])
Equal(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse()
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
'
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
'
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
'
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
'
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
'
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "namereplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
# sequential surrogate characters
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "replace"),
"[??]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
backslashreplace = ''.join('\\x%02x' % b
for b in self.ill_formed_sequence)
self.assertEqual(test_sequence.decode(self.encoding, "backslashreplace"),
before + backslashreplace + after)
def test_incremental_surrogatepass(self):
# Test incremental decoder for surrogatepass handler:
# see issue #24214
# High surrogate
data = '\uD901'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:], True), '\uD901')
# Low surrogate
data = '\uDC02'.encode(self.encoding, 'surrogatepass')
for i in range(1, len(data)):
dec = codecs.getincrementaldecoder(self.encoding)('surrogatepass')
self.assertEqual(dec.decode(data[:i]), '')
self.assertEqual(dec.decode(data[i:]), '\uDC02')
class UTF32Test(ReadTest, unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest, unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest, unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest, unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(s)
with warnings_helper.check_warnings(('', DeprecationWarning)):
reader = codecs.open(os_helper.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest, unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
class UTF16BETest(ReadTest, unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
BOM = b''
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode(self.encoding, error_handler),
expected)
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode(self.encoding, "surrogateescape"),
self.BOM + b'[\x80]')
with self.assertRaises(UnicodeEncodeError) as cm:
"[\uDC80\uD800\uDFFF]".encode(self.encoding, "surrogateescape")
exc = cm.exception
self.assertEqual(exc.object[exc.start:exc.end], '\uD800\uDFFF')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode(self.encoding, "surrogatepass"),
self.BOM + b"abc\xed\xa0\x80def")
self.assertEqual("\U00010fff\uD800".encode(self.encoding, "surrogatepass"),
self.BOM + b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual("[\uD800\uDC80]".encode(self.encoding, "surrogatepass"),
self.BOM + b'[\xed\xa0\x80\xed\xb2\x80]')
self.assertEqual(b"abc\xed\xa0\x80def".decode(self.encoding, "surrogatepass"),
"abc\ud800def")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode(self.encoding, "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode(self.encoding, "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode(self.encoding, "surrogatepass")
def test_incremental_errors(self):
# Test that the incremental decoder can fail with final=False.
# See issue #24214
cases = [b'\x80', b'\xBF', b'\xC0', b'\xC1', b'\xF5', b'\xF6', b'\xFF']
for prefix in (b'\xC2', b'\xDF', b'\xE0', b'\xE0\xA0', b'\xEF',
b'\xEF\xBF', b'\xF0', b'\xF0\x90', b'\xF0\x90\x80',
b'\xF4', b'\xF4\x8F', b'\xF4\x8F\xBF'):
for suffix in b'\x7F', b'\xC0':
cases.append(prefix + suffix)
cases.extend((b'\xE0\x80', b'\xE0\x9F', b'\xED\xA0\x80',
b'\xED\xBF\xBF', b'\xF0\x80', b'\xF0\x8F', b'\xF4\x90'))
for data in cases:
with self.subTest(data=data):
dec = codecs.getincrementaldecoder(self.encoding)()
self.assertRaises(UnicodeDecodeError, dec.decode, data)
class UTF7Test(ReadTest, unittest.TestCase):
encoding = "utf-7"
def test_ascii(self):
# Set D (directly encoded characters)
set_d = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789'
'\'(),-./:?')
self.assertEqual(set_d.encode(self.encoding), set_d.encode('ascii'))
self.assertEqual(set_d.encode('ascii').decode(self.encoding), set_d)
set_o = ' !"#$%&*;<=>@[]^_`{|}'
self.assertEqual(set_o.encode(self.encoding), set_o.encode('ascii'))
self.assertEqual(set_o.encode('ascii').decode(self.encoding), set_o)
# +
self.assertEqual('a+b'.encode(self.encoding), b'a+-b')
self.assertEqual(b'a+-b'.decode(self.encoding), 'a+b')
# White spaces
ws = ' \t\n\r'
self.assertEqual(ws.encode(self.encoding), ws.encode('ascii'))
self.assertEqual(ws.encode('ascii').decode(self.encoding), ws)
# Other ASCII characters
other_ascii = ''.join(sorted(set(bytes(range(0x80)).decode()) -
set(set_d + set_o + '+' + ws)))
self.assertEqual(other_ascii.encode(self.encoding),
b'+AAAAAQACAAMABAAFAAYABwAIAAsADAAOAA8AEAARABIAEwAU'
b'ABUAFgAXABgAGQAaABsAHAAdAB4AHwBcAH4Afw-')
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'\xffb', '\ufffdb'),
(b'a\xffb', 'a\ufffdb'),
(b'a\xff\xffb', 'a\ufffd\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+IKw-b\xff', 'a\u20acb\ufffd'),
(b'a+IKw\xffb', 'a\u20ac\ufffdb'),
(b'a+@b', 'a\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
self.assertEqual(b'+2AHcoA'.decode(self.encoding), '\U000104A0')
self.assertEqual('\u20ac\U000104A0'.encode(self.encoding), b'+IKzYAdyg-')
self.assertEqual(b'+IKzYAdyg-'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual(b'+IKzYAdyg'.decode(self.encoding), '\u20ac\U000104A0')
self.assertEqual('\u20ac\u20ac\U000104A0'.encode(self.encoding),
b'+IKwgrNgB3KA-')
self.assertEqual(b'+IKwgrNgB3KA-'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
self.assertEqual(b'+IKwgrNgB3KA'.decode(self.encoding),
'\u20ac\u20ac\U000104A0')
def test_lone_surrogates(self):
tests = [
(b'a+2AE-b', 'a\ud801b'),
(b'a+2AE\xffb', 'a\ufffdb'),
(b'a+2AE', 'a\ufffd'),
(b'a+2AEA-b', 'a\ufffdb'),
(b'a+2AH-b', 'a\ufffdb'),
(b'a+IKzYAQ-b', 'a\u20ac\ud801b'),
(b'a+IKzYAQ\xffb', 'a\u20ac\ufffdb'),
(b'a+IKzYAQA-b', 'a\u20ac\ufffdb'),
(b'a+IKzYAd-b', 'a\u20ac\ufffdb'),
(b'a+IKwgrNgB-b', 'a\u20ac\u20ac\ud801b'),
(b'a+IKwgrNgB\xffb', 'a\u20ac\u20ac\ufffdb'),
(b'a+IKwgrNgB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrNgBA-b', 'a\u20ac\u20ac\ufffdb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class UTF8SigTest(UTF8Test, unittest.TestCase):
encoding = "utf-8-sig"
BOM = codecs.BOM_UTF8
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
self.assertEqual(codecs.escape_decode(bytearray()), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", b"[\\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\x410]", b"[A0]")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, b"\\" + b)
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), b"\\" + b.upper())
with self.assertWarns(DeprecationWarning):
check(br"\8", b"\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", b"\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", b"\\\xfa")
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
def test_decode_invalid(self):
testcases = [
(b"xn--w&", "strict", UnicodeError()),
(b"xn--w&", "ignore", "xn-"),
]
for puny, errors, expected in testcases:
with self.subTest(puny=puny, errors=errors):
if isinstance(expected, Exception):
self.assertRaises(UnicodeError, puny.decode, "punycode", errors)
else:
self.assertEqual(puny.decode("punycode", errors), expected)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
def test_errors(self):
"python.org".encode("idna", "strict")
b"python.org".decode("idna", "strict")
for errors in ("ignore", "replace", "backslashreplace",
"surrogateescape"):
self.assertRaises(Exception, "python.org".encode, "idna", errors)
self.assertRaises(Exception,
b"python.org".decode, "idna", errors)
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
# test keywords
self.assertEqual(codecs.decode(obj=b'\xe4\xf6\xfc', encoding='latin-1'),
'\xe4\xf6\xfc')
self.assertEqual(codecs.decode(b'[\xff]', 'ascii', errors='ignore'),
'[]')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
# test keywords
self.assertEqual(codecs.encode(obj='\xe4\xf6\xfc', encoding='latin-1'),
b'\xe4\xf6\xfc')
self.assertEqual(codecs.encode('[\xff]', 'ascii', errors='ignore'),
b'[]')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_unregister(self):
name = "nonexistent_codec_name"
search_function = mock.Mock()
codecs.register(search_function)
self.assertRaises(TypeError, codecs.lookup, name)
search_function.assert_called_with(name)
search_function.reset_mock()
codecs.unregister(search_function)
self.assertRaises(LookupError, codecs.lookup, name)
search_function.assert_not_called()
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
def test_all(self):
api = (
"encode", "decode",
"register", "CodecInfo", "Codec", "IncrementalEncoder",
"IncrementalDecoder", "StreamReader", "StreamWriter", "lookup",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"register_error", "lookup_error",
"strict_errors", "replace_errors", "ignore_errors",
"xmlcharrefreplace_errors", "backslashreplace_errors",
"namereplace_errors",
"open", "EncodedFile",
"iterencode", "iterdecode",
"BOM", "BOM_BE", "BOM_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_BE", "BOM_UTF16_LE",
"BOM_UTF32", "BOM_UTF32_BE", "BOM_UTF32_LE",
"BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", # Undocumented
"StreamReaderWriter", "StreamRecoder",
)
self.assertCountEqual(api, codecs.__all__)
for api in codecs.__all__:
getattr(codecs, api)
def test_open(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for mode in ('w', 'r', 'r+', 'w+', 'a', 'a+'):
with self.subTest(mode), \
codecs.open(os_helper.TESTFN, mode, 'ascii') as file:
self.assertIsInstance(file, codecs.StreamReaderWriter)
def test_undefined(self):
self.assertRaises(UnicodeError, codecs.encode, 'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'abc', 'undefined')
self.assertRaises(UnicodeError, codecs.encode, '', 'undefined')
self.assertRaises(UnicodeError, codecs.decode, b'', 'undefined')
for errors in ('strict', 'ignore', 'replace', 'backslashreplace'):
self.assertRaises(UnicodeError,
codecs.encode, 'abc', 'undefined', errors)
self.assertRaises(UnicodeError,
codecs.decode, b'abc', 'undefined', errors)
def test_file_closes_if_lookup_error_raised(self):
mock_open = mock.mock_open()
with mock.patch('builtins.open', mock_open) as file:
with self.assertRaises(LookupError):
codecs.open(os_helper.TESTFN, 'wt', 'invalid-encoding')
file().close.assert_called()
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_t",
"koi8_u",
"kz1048",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
if hasattr(codecs, "oem_encode"):
all_unicode_encodings.append("oem")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_stateful = [
"punycode",
]
class BasicUnicodeTest(unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_stateful:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_unicode_with_stateful:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace", "ab\ufffe"),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: None}),
("ab\\x02", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: -2}
)
self.assertRaisesRegex(TypeError,
"character mapping must be in range\\(0x110000\\)",
codecs.charmap_decode,
b"\x00\x01\x02", "strict", {0: "A", 1: 'Bb', 2: 999999999}
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "backslashreplace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\\x02", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
self.assertTrue(f.closed)
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
class TypesTest(unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding a unicode string is supported and gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "backslashreplace"),
(r"\x5c\x55\x30\x30\x31\x31\x30\x30\x30\x30", 10))
class UnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", r"[\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for i in range(97, 123):
b = bytes([i])
if b not in b'abfnrtuvx':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b, "\\" + chr(i))
if b.upper() not in b'UN':
with self.assertWarns(DeprecationWarning):
check(b"\\" + b.upper(), "\\" + chr(i-32))
with self.assertWarns(DeprecationWarning):
check(br"\8", "\\8")
with self.assertWarns(DeprecationWarning):
check(br"\9", "\\9")
with self.assertWarns(DeprecationWarning):
check(b"\\\xfa", "\\\xfa")
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'',
'',
'',
'\x00',
'\x00',
'\x00\t',
'\x00\t',
'\x00\t\n',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class RawUnicodeEscapeTest(ReadTest, unittest.TestCase):
encoding = "raw-unicode-escape"
test_lone_surrogates = None
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
def test_partial(self):
self.check_partial(
"\x00\t\n\r\\\xff\uffff\U00010000",
[
'\x00',
'\x00\t',
'\x00\t\n',
'\x00\t\n\r',
'\x00\t\n\r',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff',
'\x00\t\n\r\\\xff\uffff\U00010000',
]
)
class EscapeEncodeTest(unittest.TestCase):
def test_escape_encode(self):
tests = [
(b'', (b'', 0)),
(b'foobar', (b'foobar', 6)),
(b'spam\0eggs', (b'spam\\x00eggs', 9)),
(b'a\'b', (b"a\\'b", 3)),
(b'b\\c', (b'b\\\\c', 3)),
(b'c\nd', (b'c\\nd', 3)),
(b'd\re', (b'd\\re', 3)),
(b'f\x7fg', (b'f\\x7fg', 3)),
]
for data, output in tests:
with self.subTest(data=data):
self.assertEqual(codecs.escape_encode(data), output)
self.assertRaises(TypeError, codecs.escape_encode, 'spam')
self.assertRaises(TypeError, codecs.escape_encode, bytearray(b'spam'))
class SurrogateEscapeTest(unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(os_helper.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
class TransformCodecTest(unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
# We check all the transform codecs accept memoryview input
# for encoding and decoding
# and also that they roundtrip correctly
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_denylists_binary_transforms(self):
# Check binary -> binary codecs give a good error for str input
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = (r"{!r} is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_denylists_text_transforms(self):
# Check str.encode gives a good error message for str -> str codecs
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_denylists_binary_transforms(self):
# Check bytes.decode and bytearray.decode give a good error
# message for binary -> binary codecs
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_denylists_text_transforms(self):
# Check str -> str codec gives a good error for binary input
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
r"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
# Check zlib codec gives a good error for malformed input
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
# Check hex codec gives a good error for malformed input
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Unfortunately, the bz2 module throws OSError, which the codec
# machinery currently can't wrap :(
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
def test_quopri_stateless(self):
# Should encode with quotetabs=True
encoded = codecs.encode(b"space tab\teol \n", "quopri-codec")
self.assertEqual(encoded, b"space=20tab=09eol=20\n")
# But should still support unescaped tabs and spaces
unescaped = b"space tab eol\n"
self.assertEqual(codecs.decode(unescaped, "quopri-codec"), unescaped)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, b"", "uu-codec")
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple search functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
class ExceptionChainingTest(unittest.TestCase):
def setUp(self):
self.codec_name = 'exception_chaining_test'
codecs.register(_get_test_codec)
self.addCleanup(codecs.unregister, _get_test_codec)
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
# recreate the codec entry) and regrtest refleak hunting (which
# runs the same test instance multiple times). This means we
# need to ensure the codecs call back in to the instance to find
# out which exception to raise rather than binding them in a
# closure to an object that may change on the next run
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
# Issue #22166: Also pop from caches to avoid appearance of ref leaks
encodings._cache.pop(self.codec_name, None)
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
# Helper to dynamically change the object raised by a test codec
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
# http://bugs.python.org/issue19609
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
# The initial codec lookup should not be wrapped
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# The stdlib non-text codecs are now marked so they're
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
# sure the case where an inappropriate output type is produced is
# handled appropriately
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
# No input or output type checks on the codecs module functions
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
# Text model methods should complain
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
r"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
r"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
class CodePageTest(unittest.TestCase):
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00', 'strict', True)
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff', 'strict', True)
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors, True)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors, True)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'namereplace',
b'[\\N{LATIN SMALL LETTER Y WITH DIAERESIS}]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
('\udcff', 'strict', None),
('[\udcff]', 'surrogateescape', b'[\xff]'),
('[\udcff]', 'surrogatepass', None),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'backslashreplace', '[\\xff]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'[\xff]', 'surrogatepass', None),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
(b'\x81\x00abc', 'backslashreplace', '\\x81\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
# test error handlers
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
('\udc98', 'surrogateescape', b'\x98'),
('\udc98', 'surrogatepass', None),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_code_page_decode_flags(self):
# Issue #36312: For some code pages (e.g. UTF-7) flags for
# MultiByteToWideChar() must be set to 0.
if support.verbose:
sys.stdout.write('\n')
for cp in (50220, 50221, 50222, 50225, 50227, 50229,
*range(57002, 57011+1), 65000):
# On small versions of Windows like Windows IoT
# not all codepages are present.
# A missing codepage causes an OSError exception
# so check for the codepage before decoding
if is_code_page_present(cp):
self.assertEqual(codecs.code_page_decode(cp, b'abc'), ('abc', 3), f'cp{cp}')
else:
if support.verbose:
print(f" skipping cp={cp}")
self.assertEqual(codecs.code_page_decode(42, b'abc'),
('\uf061\uf062\uf063', 3))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
def test_mbcs_alias(self):
# Check that looking up our 'default' codepage will return
# mbcs when we don't have a more specific one available
with mock.patch('_winapi.GetACP', return_value=123):
codec = codecs.lookup('cp123')
self.assertEqual(codec.name, 'mbcs')
@support.bigmemtest(size=2**31, memuse=7, dry_run=False)
def test_large_input(self, size):
# Test input longer than INT_MAX.
# Input should contain undecodable bytes before and after
# the INT_MAX limit.
encoded = (b'01234567' * ((size//8)-1) +
b'\x85\x86\xea\xeb\xec\xef\xfc\xfd\xfe\xff')
self.assertEqual(len(encoded), size+2)
decoded = codecs.code_page_decode(932, encoded, 'surrogateescape', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), decoded[1])
self.assertEqual(decoded[0][:10], '0123456701')
self.assertEqual(decoded[0][-20:],
'6701234567'
'\udc85\udc86\udcea\udceb\udcec'
'\udcef\udcfc\udcfd\udcfe\udcff')
@support.bigmemtest(size=2**31, memuse=6, dry_run=False)
def test_large_utf8_input(self, size):
# Test input longer than INT_MAX.
# Input should contain a decodable multi-byte character
# surrounding INT_MAX
encoded = (b'0123456\xed\x84\x80' * (size//8))
self.assertEqual(len(encoded), size // 8 * 10)
decoded = codecs.code_page_decode(65001, encoded, 'ignore', True)
self.assertEqual(decoded[1], len(encoded))
del encoded
self.assertEqual(len(decoded[0]), size)
self.assertEqual(decoded[0][:10], '0123456\ud10001')
self.assertEqual(decoded[0][-11:], '56\ud1000123456\ud100')
class ASCIITest(unittest.TestCase):
def test_encode(self):
self.assertEqual('abc123'.encode('ascii'), b'abc123')
def test_encode_error(self):
for data, error_handler, expected in (
('[\x80\xff\u20ac]', 'ignore', b'[]'),
('[\x80\xff\u20ac]', 'replace', b'[???]'),
('[\x80\xff\u20ac]', 'xmlcharrefreplace', b'[€ÿ€]'),
('[\x80\xff\u20ac\U000abcde]', 'backslashreplace',
b'[\\x80\\xff\\u20ac\\U000abcde]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('ascii', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\xff'.encode('ascii', 'surrogateescape')
def test_decode(self):
self.assertEqual(b'abc'.decode('ascii'), 'abc')
def test_decode_error(self):
for data, error_handler, expected in (
(b'[\x80\xff]', 'ignore', '[]'),
(b'[\x80\xff]', 'replace', '[\ufffd\ufffd]'),
(b'[\x80\xff]', 'surrogateescape', '[\udc80\udcff]'),
(b'[\x80\xff]', 'backslashreplace', '[\\x80\\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.decode('ascii', error_handler),
expected)
class Latin1Test(unittest.TestCase):
def test_encode(self):
for data, expected in (
('abc', b'abc'),
('\x80\xe9\xff', b'\x80\xe9\xff'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.encode('latin1'), expected)
def test_encode_errors(self):
for data, error_handler, expected in (
('[\u20ac\udc80]', 'ignore', b'[]'),
('[\u20ac\udc80]', 'replace', b'[??]'),
('[\u20ac\U000abcde]', 'backslashreplace',
b'[\\u20ac\\U000abcde]'),
('[\u20ac\udc80]', 'xmlcharrefreplace', b'[€�]'),
('[\udc80\udcff]', 'surrogateescape', b'[\x80\xff]'),
):
with self.subTest(data=data, error_handler=error_handler,
expected=expected):
self.assertEqual(data.encode('latin1', error_handler),
expected)
def test_encode_surrogateescape_error(self):
with self.assertRaises(UnicodeEncodeError):
# the first character can be decoded, but not the second
'\udc80\u20ac'.encode('latin1', 'surrogateescape')
def test_decode(self):
for data, expected in (
(b'abc', 'abc'),
(b'[\x80\xff]', '[\x80\xff]'),
):
with self.subTest(data=data, expected=expected):
self.assertEqual(data.decode('latin1'), expected)
class StreamRecoderTest(unittest.TestCase):
def test_writelines(self):
bio = io.BytesIO()
codec = codecs.lookup('ascii')
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.ascii.StreamReader, encodings.ascii.StreamWriter)
sr.writelines([b'a', b'b'])
self.assertEqual(bio.getvalue(), b'ab')
def test_write(self):
bio = io.BytesIO()
codec = codecs.lookup('latin1')
# Recode from Latin-1 to utf-8.
sr = codecs.StreamRecoder(bio, codec.encode, codec.decode,
encodings.utf_8.StreamReader, encodings.utf_8.StreamWriter)
text = 'àñé'
sr.write(text.encode('latin1'))
self.assertEqual(bio.getvalue(), text.encode('utf-8'))
def test_seeking_read(self):
bio = io.BytesIO('line1\nline2\nline3\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
self.assertEqual(sr.readline(), b'line1\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'line1\n')
self.assertEqual(sr.readline(), b'line2\n')
self.assertEqual(sr.readline(), b'line3\n')
self.assertEqual(sr.readline(), b'')
def test_seeking_write(self):
bio = io.BytesIO('123456789\n'.encode('utf-16-le'))
sr = codecs.EncodedFile(bio, 'utf-8', 'utf-16-le')
# Test that seek() only resets its internal buffer when offset
# and whence are zero.
sr.seek(2)
sr.write(b'\nabc\n')
self.assertEqual(sr.readline(), b'789\n')
sr.seek(0)
self.assertEqual(sr.readline(), b'1\n')
self.assertEqual(sr.readline(), b'abc\n')
self.assertEqual(sr.readline(), b'789\n')
@unittest.skipIf(_testcapi is None, 'need _testcapi module')
class LocaleCodecTest(unittest.TestCase):
ENCODING = sys.getfilesystemencoding()
STRINGS = ("ascii", "ulatin1:\xa7\xe9",
"u255:\xff",
"UCS:\xe9\u20ac\U0010ffff",
"surrogates:\uDC80\uDCFF")
BYTES_STRINGS = (b"blatin1:\xa7\xe9", b"b255:\xff")
SURROGATES = "\uDC80\uDCFF"
def encode(self, text, errors="strict"):
return _testcapi.EncodeLocaleEx(text, 0, errors)
def check_encode_strings(self, errors):
for text in self.STRINGS:
with self.subTest(text=text):
try:
expected = text.encode(self.ENCODING, errors)
except UnicodeEncodeError:
with self.assertRaises(RuntimeError) as cm:
self.encode(text, errors)
errmsg = str(cm.exception)
self.assertRegex(errmsg, r"encode error: pos=[0-9]+, reason=")
else:
encoded = self.encode(text, errors)
self.assertEqual(encoded, expected)
def test_encode_strict(self):
self.check_encode_strings("strict")
def test_encode_surrogateescape(self):
self.check_encode_strings("surrogateescape")
def test_encode_surrogatepass(self):
try:
self.encode('', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} encoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_encode_strings("surrogatepass")
def test_encode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.encode('', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
def decode(self, encoded, errors="strict"):
return _testcapi.DecodeLocaleEx(encoded, 0, errors)
def check_decode_strings(self, errors):
is_utf8 = (self.ENCODING == "utf-8")
if is_utf8:
encode_errors = 'surrogateescape'
else:
encode_errors = 'strict'
strings = list(self.BYTES_STRINGS)
for text in self.STRINGS:
try:
encoded = text.encode(self.ENCODING, encode_errors)
if encoded not in strings:
strings.append(encoded)
except UnicodeEncodeError:
encoded = None
if is_utf8:
encoded2 = text.encode(self.ENCODING, 'surrogatepass')
if encoded2 != encoded:
strings.append(encoded2)
for encoded in strings:
with self.subTest(encoded=encoded):
try:
expected = encoded.decode(self.ENCODING, errors)
except UnicodeDecodeError:
with self.assertRaises(RuntimeError) as cm:
self.decode(encoded, errors)
errmsg = str(cm.exception)
self.assertTrue(errmsg.startswith("decode error: "), errmsg)
else:
decoded = self.decode(encoded, errors)
self.assertEqual(decoded, expected)
def test_decode_strict(self):
self.check_decode_strings("strict")
def test_decode_surrogateescape(self):
self.check_decode_strings("surrogateescape")
def test_decode_surrogatepass(self):
try:
self.decode(b'', 'surrogatepass')
except ValueError as exc:
if str(exc) == 'unsupported error handler':
self.skipTest(f"{self.ENCODING!r} decoder doesn't support "
f"surrogatepass error handler")
else:
raise
self.check_decode_strings("surrogatepass")
def test_decode_unsupported_error_handler(self):
with self.assertRaises(ValueError) as cm:
self.decode(b'', 'backslashreplace')
self.assertEqual(str(cm.exception), 'unsupported error handler')
class Rot13Test(unittest.TestCase):
def test_encode(self):
ciphertext = codecs.encode("Caesar liked ciphers", 'rot-13')
self.assertEqual(ciphertext, 'Pnrfne yvxrq pvcuref')
def test_decode(self):
plaintext = codecs.decode('Rg gh, Oehgr?', 'rot-13')
self.assertEqual(plaintext, 'Et tu, Brute?')
def test_incremental_encode(self):
encoder = codecs.getincrementalencoder('rot-13')()
ciphertext = encoder.encode('ABBA nag Cheryl Baker')
self.assertEqual(ciphertext, 'NOON ant Purely Onxre')
def test_incremental_decode(self):
decoder = codecs.getincrementaldecoder('rot-13')()
plaintext = decoder.decode('terra Ares envy tha')
self.assertEqual(plaintext, 'green Nerf rail gun')
class Rot13UtilTest(unittest.TestCase):
def test_rot13_func(self):
infile = io.StringIO('Gb or, be abg gb or, gung vf gur dhrfgvba')
outfile = io.StringIO()
encodings.rot_13.rot13(infile, outfile)
outfile.seek(0)
plain_text = outfile.read()
self.assertEqual(
plain_text,
'To be, or not to be, that is the question')
class CodecNameNormalizationTest(unittest.TestCase):
def test_codecs_lookup(self):
FOUND = (1, 2, 3, 4)
NOT_FOUND = (None, None, None, None)
def search_function(encoding):
if encoding == "aaa_8":
return FOUND
else:
return NOT_FOUND
codecs.register(search_function)
self.addCleanup(codecs.unregister, search_function)
self.assertEqual(FOUND, codecs.lookup('aaa_8'))
self.assertEqual(FOUND, codecs.lookup('AAA-8'))
self.assertEqual(FOUND, codecs.lookup('AAA---8'))
self.assertEqual(FOUND, codecs.lookup('AAA 8'))
self.assertEqual(FOUND, codecs.lookup('aaa\xe9\u20ac-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('AAA...8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB-8'))
self.assertEqual(NOT_FOUND, codecs.lookup('BBB.8'))
self.assertEqual(NOT_FOUND, codecs.lookup('a\xe9\u20ac-8'))
def test_encodings_normalize_encoding(self):
# encodings.normalize_encoding() ignores non-ASCII characters.
normalize = encodings.normalize_encoding
self.assertEqual(normalize('utf_8'), 'utf_8')
self.assertEqual(normalize('utf\xE9\u20AC\U0010ffff-8'), 'utf_8')
self.assertEqual(normalize('utf 8'), 'utf_8')
# encodings.normalize_encoding() doesn't convert
# characters to lower case.
self.assertEqual(normalize('UTF 8'), 'UTF_8')
self.assertEqual(normalize('utf.8'), 'utf.8')
self.assertEqual(normalize('utf...8'), 'utf...8')
if __name__ == "__main__":
unittest.main()
| true | true |
f73110ecdff79d7c029c0dd0d895ef71ea68326b | 12,233 | py | Python | loopy/transform/instruction.py | benSepanski/loopy | 5db582d579eb65ce58b93e2c53feb1d48404cf2d | [
"MIT"
] | null | null | null | loopy/transform/instruction.py | benSepanski/loopy | 5db582d579eb65ce58b93e2c53feb1d48404cf2d | [
"MIT"
] | null | null | null | loopy/transform/instruction.py | benSepanski/loopy | 5db582d579eb65ce58b93e2c53feb1d48404cf2d | [
"MIT"
] | null | null | null | from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2012 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six # noqa
from loopy.diagnostic import LoopyError
from loopy.kernel import LoopKernel
from loopy.kernel.function_interface import (ScalarCallable, CallableKernel)
from loopy.program import Program, iterate_over_kernels_if_given_program
# {{{ find_instructions
def find_instructions_in_single_kernel(kernel, insn_match):
assert isinstance(kernel, LoopKernel)
from loopy.match import parse_match
match = parse_match(insn_match)
return [insn for insn in kernel.instructions if match(kernel, insn)]
def find_instructions(program, insn_match):
assert isinstance(program, Program)
insns = []
for in_knl_callable in program.callables_table.values():
if isinstance(in_knl_callable, CallableKernel):
insns += (find_instructions_in_single_kernel(
in_knl_callable.subkernel, insn_match))
elif isinstance(in_knl_callable, ScalarCallable):
pass
else:
raise NotImplementedError("Unknown callable type %s." % (
type(in_knl_callable)))
return insns
# }}}
# {{{ map_instructions
def map_instructions(kernel, insn_match, f):
from loopy.match import parse_match
match = parse_match(insn_match)
new_insns = []
for insn in kernel.instructions:
if match(kernel, insn):
new_insns.append(f(insn))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
# }}}
# {{{ set_instruction_priority
@iterate_over_kernels_if_given_program
def set_instruction_priority(kernel, insn_match, priority):
"""Set the priority of instructions matching *insn_match* to *priority*.
*insn_match* may be any instruction id match understood by
:func:`loopy.match.parse_match`.
"""
def set_prio(insn):
return insn.copy(priority=priority)
return map_instructions(kernel, insn_match, set_prio)
# }}}
# {{{ add_dependency
@iterate_over_kernels_if_given_program
def add_dependency(kernel, insn_match, depends_on):
"""Add the instruction dependency *dependency* to the instructions matched
by *insn_match*.
*insn_match* and *depends_on* may be any instruction id match understood by
:func:`loopy.match.parse_match`.
.. versionchanged:: 2016.3
Third argument renamed to *depends_on* for clarity, allowed to
be not just ID but also match expression.
"""
if isinstance(depends_on, str) and depends_on in kernel.id_to_insn:
added_deps = frozenset([depends_on])
else:
added_deps = frozenset(
dep.id for dep in find_instructions_in_single_kernel(kernel,
depends_on))
if not added_deps:
raise LoopyError("no instructions found matching '%s' "
"(to add as dependencies)" % depends_on)
matched = [False]
def add_dep(insn):
new_deps = insn.depends_on
matched[0] = True
if new_deps is None:
new_deps = added_deps
else:
new_deps = new_deps | added_deps
return insn.copy(depends_on=new_deps)
result = map_instructions(kernel, insn_match, add_dep)
if not matched[0]:
raise LoopyError("no instructions found matching '%s' "
"(to which dependencies would be added)" % insn_match)
return result
# }}}
# {{{ remove_instructions
def remove_instructions(kernel, insn_ids):
"""Return a new kernel with instructions in *insn_ids* removed.
Dependencies across (one, for now) deleted isntructions are propagated.
Behavior is undefined for now for chains of dependencies within the
set of deleted instructions.
This also updates *no_sync_with* for all instructions.
"""
if not insn_ids:
return kernel
assert isinstance(insn_ids, set)
id_to_insn = kernel.id_to_insn
new_insns = []
for insn in kernel.instructions:
if insn.id in insn_ids:
continue
# transitively propagate dependencies
# (only one level for now)
if insn.depends_on is None:
depends_on = frozenset()
else:
depends_on = insn.depends_on
new_deps = depends_on - insn_ids
for dep_id in depends_on & insn_ids:
new_deps = new_deps | id_to_insn[dep_id].depends_on
# update no_sync_with
new_no_sync_with = frozenset((insn_id, scope)
for insn_id, scope in insn.no_sync_with
if insn_id not in insn_ids)
new_insns.append(
insn.copy(depends_on=new_deps, no_sync_with=new_no_sync_with))
return kernel.copy(
instructions=new_insns)
# }}}
# {{{ replace_instruction_ids
def replace_instruction_ids(kernel, replacements):
new_insns = []
for insn in kernel.instructions:
changed = False
new_depends_on = []
new_no_sync_with = []
for dep in insn.depends_on:
if dep in replacements:
new_depends_on.extend(replacements[dep])
changed = True
else:
new_depends_on.append(dep)
for insn_id, scope in insn.no_sync_with:
if insn_id in replacements:
new_no_sync_with.extend(
(repl, scope) for repl in replacements[insn_id])
changed = True
else:
new_no_sync_with.append((insn_id, scope))
new_insns.append(
insn.copy(
depends_on=frozenset(new_depends_on),
no_sync_with=frozenset(new_no_sync_with))
if changed else insn)
return kernel.copy(instructions=new_insns)
# }}}
# {{{ tag_instructions
@iterate_over_kernels_if_given_program
def tag_instructions(kernel, new_tag, within=None):
from loopy.match import parse_match
within = parse_match(within)
new_insns = []
for insn in kernel.instructions:
if within(kernel, insn):
new_insns.append(
insn.copy(tags=insn.tags | frozenset([new_tag])))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
# }}}
# {{{ add nosync
@iterate_over_kernels_if_given_program
def add_nosync(kernel, scope, source, sink, bidirectional=False, force=False,
empty_ok=False):
"""Add a *no_sync_with* directive between *source* and *sink*.
*no_sync_with* is only added if *sink* depends on *source* or
if the instruction pair is in a conflicting group.
This function does not check for the presence of a memory dependency.
:arg kernel: The kernel
:arg source: Either a single instruction id, or any instruction id
match understood by :func:`loopy.match.parse_match`.
:arg sink: Either a single instruction id, or any instruction id
match understood by :func:`loopy.match.parse_match`.
:arg scope: A valid *no_sync_with* scope. See
:attr:`loopy.InstructionBase.no_sync_with` for allowable scopes.
:arg bidirectional: A :class:`bool`. If *True*, add a *no_sync_with*
to both the source and sink instructions, otherwise the directive
is only added to the sink instructions.
:arg force: A :class:`bool`. If *True*, add a *no_sync_with* directive
even without the presence of a dependency edge or conflicting
instruction group.
:arg empty_ok: If *True*, do not complain even if no *nosync* tags were
added as a result of the transformation.
:return: The updated kernel
.. versionchanged:: 2018.1
If the transformation adds no *nosync* directives, it will complain.
This used to silently pass. This behavior can be restored using
*empty_ok*.
"""
assert isinstance(kernel, LoopKernel)
if isinstance(source, str) and source in kernel.id_to_insn:
sources = frozenset([source])
else:
sources = frozenset(
source.id for source in find_instructions_in_single_kernel(
kernel, source))
if isinstance(sink, str) and sink in kernel.id_to_insn:
sinks = frozenset([sink])
else:
sinks = frozenset(
sink.id for sink in find_instructions_in_single_kernel(
kernel, sink))
if not sources and not empty_ok:
raise LoopyError("No match found for source specification '%s'." % source)
if not sinks and not empty_ok:
raise LoopyError("No match found for sink specification '%s'." % sink)
def insns_in_conflicting_groups(insn1_id, insn2_id):
insn1 = kernel.id_to_insn[insn1_id]
insn2 = kernel.id_to_insn[insn2_id]
return (
bool(insn1.groups & insn2.conflicts_with_groups)
or
bool(insn2.groups & insn1.conflicts_with_groups))
from collections import defaultdict
nosync_to_add = defaultdict(set)
rec_dep_map = kernel.recursive_insn_dep_map()
for sink in sinks:
for source in sources:
needs_nosync = force or (
source in rec_dep_map[sink]
or insns_in_conflicting_groups(source, sink))
if not needs_nosync:
continue
nosync_to_add[sink].add((source, scope))
if bidirectional:
nosync_to_add[source].add((sink, scope))
if not nosync_to_add and not empty_ok:
raise LoopyError("No nosync annotations were added as a result "
"of this call. add_nosync will (by default) only add them to "
"accompany existing depencies or group exclusions. Maybe you want "
"to pass force=True?")
new_instructions = list(kernel.instructions)
for i, insn in enumerate(new_instructions):
if insn.id in nosync_to_add:
new_instructions[i] = insn.copy(no_sync_with=insn.no_sync_with
| frozenset(nosync_to_add[insn.id]))
return kernel.copy(instructions=new_instructions)
# }}}
# {{{ uniquify_instruction_ids
@iterate_over_kernels_if_given_program
def uniquify_instruction_ids(kernel):
"""Converts any ids that are :class:`loopy.UniqueName` or *None* into unique
strings.
This function does *not* deduplicate existing instruction ids.
"""
from loopy.kernel.creation import UniqueName
insn_ids = set(
insn.id for insn in kernel.instructions
if insn.id is not None and not isinstance(insn.id, UniqueName))
from pytools import UniqueNameGenerator
insn_id_gen = UniqueNameGenerator(insn_ids)
new_instructions = []
for insn in kernel.instructions:
if insn.id is None:
new_instructions.append(
insn.copy(id=insn_id_gen("insn")))
elif isinstance(insn.id, UniqueName):
new_instructions.append(
insn.copy(id=insn_id_gen(insn.id.name)))
else:
new_instructions.append(insn)
return kernel.copy(instructions=new_instructions)
# }}}
# vim: foldmethod=marker
| 31.366667 | 83 | 0.666721 | from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2012 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
from loopy.diagnostic import LoopyError
from loopy.kernel import LoopKernel
from loopy.kernel.function_interface import (ScalarCallable, CallableKernel)
from loopy.program import Program, iterate_over_kernels_if_given_program
def find_instructions_in_single_kernel(kernel, insn_match):
assert isinstance(kernel, LoopKernel)
from loopy.match import parse_match
match = parse_match(insn_match)
return [insn for insn in kernel.instructions if match(kernel, insn)]
def find_instructions(program, insn_match):
assert isinstance(program, Program)
insns = []
for in_knl_callable in program.callables_table.values():
if isinstance(in_knl_callable, CallableKernel):
insns += (find_instructions_in_single_kernel(
in_knl_callable.subkernel, insn_match))
elif isinstance(in_knl_callable, ScalarCallable):
pass
else:
raise NotImplementedError("Unknown callable type %s." % (
type(in_knl_callable)))
return insns
def map_instructions(kernel, insn_match, f):
from loopy.match import parse_match
match = parse_match(insn_match)
new_insns = []
for insn in kernel.instructions:
if match(kernel, insn):
new_insns.append(f(insn))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
@iterate_over_kernels_if_given_program
def set_instruction_priority(kernel, insn_match, priority):
def set_prio(insn):
return insn.copy(priority=priority)
return map_instructions(kernel, insn_match, set_prio)
@iterate_over_kernels_if_given_program
def add_dependency(kernel, insn_match, depends_on):
if isinstance(depends_on, str) and depends_on in kernel.id_to_insn:
added_deps = frozenset([depends_on])
else:
added_deps = frozenset(
dep.id for dep in find_instructions_in_single_kernel(kernel,
depends_on))
if not added_deps:
raise LoopyError("no instructions found matching '%s' "
"(to add as dependencies)" % depends_on)
matched = [False]
def add_dep(insn):
new_deps = insn.depends_on
matched[0] = True
if new_deps is None:
new_deps = added_deps
else:
new_deps = new_deps | added_deps
return insn.copy(depends_on=new_deps)
result = map_instructions(kernel, insn_match, add_dep)
if not matched[0]:
raise LoopyError("no instructions found matching '%s' "
"(to which dependencies would be added)" % insn_match)
return result
def remove_instructions(kernel, insn_ids):
if not insn_ids:
return kernel
assert isinstance(insn_ids, set)
id_to_insn = kernel.id_to_insn
new_insns = []
for insn in kernel.instructions:
if insn.id in insn_ids:
continue
if insn.depends_on is None:
depends_on = frozenset()
else:
depends_on = insn.depends_on
new_deps = depends_on - insn_ids
for dep_id in depends_on & insn_ids:
new_deps = new_deps | id_to_insn[dep_id].depends_on
new_no_sync_with = frozenset((insn_id, scope)
for insn_id, scope in insn.no_sync_with
if insn_id not in insn_ids)
new_insns.append(
insn.copy(depends_on=new_deps, no_sync_with=new_no_sync_with))
return kernel.copy(
instructions=new_insns)
def replace_instruction_ids(kernel, replacements):
new_insns = []
for insn in kernel.instructions:
changed = False
new_depends_on = []
new_no_sync_with = []
for dep in insn.depends_on:
if dep in replacements:
new_depends_on.extend(replacements[dep])
changed = True
else:
new_depends_on.append(dep)
for insn_id, scope in insn.no_sync_with:
if insn_id in replacements:
new_no_sync_with.extend(
(repl, scope) for repl in replacements[insn_id])
changed = True
else:
new_no_sync_with.append((insn_id, scope))
new_insns.append(
insn.copy(
depends_on=frozenset(new_depends_on),
no_sync_with=frozenset(new_no_sync_with))
if changed else insn)
return kernel.copy(instructions=new_insns)
@iterate_over_kernels_if_given_program
def tag_instructions(kernel, new_tag, within=None):
from loopy.match import parse_match
within = parse_match(within)
new_insns = []
for insn in kernel.instructions:
if within(kernel, insn):
new_insns.append(
insn.copy(tags=insn.tags | frozenset([new_tag])))
else:
new_insns.append(insn)
return kernel.copy(instructions=new_insns)
@iterate_over_kernels_if_given_program
def add_nosync(kernel, scope, source, sink, bidirectional=False, force=False,
empty_ok=False):
assert isinstance(kernel, LoopKernel)
if isinstance(source, str) and source in kernel.id_to_insn:
sources = frozenset([source])
else:
sources = frozenset(
source.id for source in find_instructions_in_single_kernel(
kernel, source))
if isinstance(sink, str) and sink in kernel.id_to_insn:
sinks = frozenset([sink])
else:
sinks = frozenset(
sink.id for sink in find_instructions_in_single_kernel(
kernel, sink))
if not sources and not empty_ok:
raise LoopyError("No match found for source specification '%s'." % source)
if not sinks and not empty_ok:
raise LoopyError("No match found for sink specification '%s'." % sink)
def insns_in_conflicting_groups(insn1_id, insn2_id):
insn1 = kernel.id_to_insn[insn1_id]
insn2 = kernel.id_to_insn[insn2_id]
return (
bool(insn1.groups & insn2.conflicts_with_groups)
or
bool(insn2.groups & insn1.conflicts_with_groups))
from collections import defaultdict
nosync_to_add = defaultdict(set)
rec_dep_map = kernel.recursive_insn_dep_map()
for sink in sinks:
for source in sources:
needs_nosync = force or (
source in rec_dep_map[sink]
or insns_in_conflicting_groups(source, sink))
if not needs_nosync:
continue
nosync_to_add[sink].add((source, scope))
if bidirectional:
nosync_to_add[source].add((sink, scope))
if not nosync_to_add and not empty_ok:
raise LoopyError("No nosync annotations were added as a result "
"of this call. add_nosync will (by default) only add them to "
"accompany existing depencies or group exclusions. Maybe you want "
"to pass force=True?")
new_instructions = list(kernel.instructions)
for i, insn in enumerate(new_instructions):
if insn.id in nosync_to_add:
new_instructions[i] = insn.copy(no_sync_with=insn.no_sync_with
| frozenset(nosync_to_add[insn.id]))
return kernel.copy(instructions=new_instructions)
@iterate_over_kernels_if_given_program
def uniquify_instruction_ids(kernel):
from loopy.kernel.creation import UniqueName
insn_ids = set(
insn.id for insn in kernel.instructions
if insn.id is not None and not isinstance(insn.id, UniqueName))
from pytools import UniqueNameGenerator
insn_id_gen = UniqueNameGenerator(insn_ids)
new_instructions = []
for insn in kernel.instructions:
if insn.id is None:
new_instructions.append(
insn.copy(id=insn_id_gen("insn")))
elif isinstance(insn.id, UniqueName):
new_instructions.append(
insn.copy(id=insn_id_gen(insn.id.name)))
else:
new_instructions.append(insn)
return kernel.copy(instructions=new_instructions)
| true | true |
f731121b0a99cb58789c4b1de9a36c3004b171ee | 442 | py | Python | Inventory/views.py | DivyaKarunakaran/Inventory | ac883f087a5204832349e0fe3ed3692bc3d413c2 | [
"bzip2-1.0.6"
] | null | null | null | Inventory/views.py | DivyaKarunakaran/Inventory | ac883f087a5204832349e0fe3ed3692bc3d413c2 | [
"bzip2-1.0.6"
] | null | null | null | Inventory/views.py | DivyaKarunakaran/Inventory | ac883f087a5204832349e0fe3ed3692bc3d413c2 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render
from django.http import Http404
from Inventory.models import Item
def index(request):
items=Item.objects.exclude(amount=0)
return render(request,'Inventory/index.html',{
'items':items,})
def item_detail(request, id):
try:
item=Item.objects.get(id=id)
except Item.DoesNotExist:
raise Http404('This item does not exist')
return render(request,'Inventory/item_detail.html',{
'item':item,
})
| 24.555556 | 53 | 0.751131 | from django.shortcuts import render
from django.http import Http404
from Inventory.models import Item
def index(request):
items=Item.objects.exclude(amount=0)
return render(request,'Inventory/index.html',{
'items':items,})
def item_detail(request, id):
try:
item=Item.objects.get(id=id)
except Item.DoesNotExist:
raise Http404('This item does not exist')
return render(request,'Inventory/item_detail.html',{
'item':item,
})
| true | true |
f731144c606f8f088d759bd0b3022acc7a14317e | 6,296 | py | Python | ckan/lib/formatters.py | okfde/ckankrzn | df4c1ed624f6751ac2a8f03527ff19e448d27dfb | [
"Apache-2.0"
] | 4 | 2017-06-12T15:18:30.000Z | 2019-10-11T15:12:43.000Z | ckan/lib/formatters.py | okfde/ckankrzn | df4c1ed624f6751ac2a8f03527ff19e448d27dfb | [
"Apache-2.0"
] | 64 | 2017-05-14T22:15:53.000Z | 2020-03-08T15:26:49.000Z | ckan/lib/formatters.py | okfde/ckankrzn | df4c1ed624f6751ac2a8f03527ff19e448d27dfb | [
"Apache-2.0"
] | 2 | 2018-09-08T08:02:25.000Z | 2020-04-24T13:02:06.000Z | # encoding: utf-8
import datetime
import pytz
from babel import numbers
import ckan.lib.i18n as i18n
from ckan.common import _, ungettext
##################################################
# #
# Month translations #
# #
##################################################
def _month_jan():
return _('January')
def _month_feb():
return _('February')
def _month_mar():
return _('March')
def _month_apr():
return _('April')
def _month_may():
return _('May')
def _month_june():
return _('June')
def _month_july():
return _('July')
def _month_aug():
return _('August')
def _month_sept():
return _('September')
def _month_oct():
return _('October')
def _month_nov():
return _('November')
def _month_dec():
return _('December')
# _MONTH_FUNCTIONS provides an easy way to get a localised month via
# _MONTH_FUNCTIONS[month]() where months are zero based ie jan = 0, dec = 11
_MONTH_FUNCTIONS = [_month_jan, _month_feb, _month_mar, _month_apr,
_month_may, _month_june, _month_july, _month_aug,
_month_sept, _month_oct, _month_nov, _month_dec]
def localised_nice_date(datetime_, show_date=False, with_hours=False):
''' Returns a friendly localised unicode representation of a datetime.
:param datetime_: The date to format
:type datetime_: datetime
:param show_date: Show date not 2 days ago etc
:type show_date: bool
:param with_hours: should the `hours:mins` be shown for dates
:type with_hours: bool
:rtype: sting
'''
def months_between(date1, date2):
if date1 > date2:
date1, date2 = date2, date1
m1 = date1.year * 12 + date1.month
m2 = date2.year * 12 + date2.month
months = m2 - m1
if date1.day > date2.day:
months -= 1
elif date1.day == date2.day:
seconds1 = date1.hour * 3600 + date1.minute + date1.second
seconds2 = date2.hour * 3600 + date2.minute + date2.second
if seconds1 > seconds2:
months -= 1
return months
if not show_date:
now = datetime.datetime.utcnow()
if datetime_.tzinfo is not None:
now = now.replace(tzinfo=datetime_.tzinfo)
else:
now = now.replace(tzinfo=pytz.utc)
datetime_ = datetime_.replace(tzinfo=pytz.utc)
date_diff = now - datetime_
days = date_diff.days
if days < 1 and now > datetime_:
# less than one day
seconds = date_diff.seconds
if seconds < 3600:
# less than one hour
if seconds < 60:
return _('Just now')
else:
return ungettext('{mins} minute ago', '{mins} minutes ago',
seconds / 60).format(mins=seconds / 60)
else:
return ungettext('{hours} hour ago', '{hours} hours ago',
seconds / 3600).format(hours=seconds / 3600)
# more than one day
months = months_between(datetime_, now)
if months < 1:
return ungettext('{days} day ago', '{days} days ago',
days).format(days=days)
if months < 13:
return ungettext('{months} month ago', '{months} months ago',
months).format(months=months)
return ungettext('over {years} year ago', 'over {years} years ago',
months / 12).format(years=months / 12)
# actual date
details = {
'min': datetime_.minute,
'hour': datetime_.hour,
'day': datetime_.day,
'year': datetime_.year,
'month': _MONTH_FUNCTIONS[datetime_.month - 1](),
'timezone': datetime_.tzname(),
}
if with_hours:
return (
# NOTE: This is for translating dates like `April 24, 2013, 10:45 (Europe/Zurich)`
_('{month} {day}, {year}, {hour:02}:{min:02} ({timezone})') \
.format(**details))
else:
return (
# NOTE: This is for translating dates like `April 24, 2013`
_('{month} {day}, {year}').format(**details))
def localised_number(number):
''' Returns a localised unicode representation of number '''
return numbers.format_number(number, locale=i18n.get_lang())
def localised_filesize(number):
''' Returns a localised unicode representation of a number in bytes, MiB
etc '''
def rnd(number, divisor):
# round to 1 decimal place
return localised_number(float(number * 10 / divisor) / 10)
if number < 1024:
return _('{bytes} bytes').format(bytes=localised_number(number))
elif number < 1024 ** 2:
return _('{kibibytes} KiB').format(kibibytes=rnd(number, 1024))
elif number < 1024 ** 3:
return _('{mebibytes} MiB').format(mebibytes=rnd(number, 1024 ** 2))
elif number < 1024 ** 4:
return _('{gibibytes} GiB').format(gibibytes=rnd(number, 1024 ** 3))
else:
return _('{tebibytes} TiB').format(tebibytes=rnd(number, 1024 ** 4))
def localised_SI_number(number):
''' Returns a localised unicode representation of a number in SI format
eg 14700 becomes 14.7k '''
def rnd(number, divisor):
# round to 1 decimal place
return localised_number(float(number * 10 / divisor) / 10)
if number < 1000:
return _('{n}').format(n=localised_number(number))
elif number < 1000 ** 2:
return _('{k}k').format(k=rnd(number, 1000))
elif number < 1000 ** 3:
return _('{m}M').format(m=rnd(number, 1000 ** 2))
elif number < 1000 ** 4:
return _('{g}G').format(g=rnd(number, 1000 ** 3))
elif number < 1000 ** 5:
return _('{t}T').format(t=rnd(number, 1000 ** 4))
elif number < 1000 ** 6:
return _('{p}P').format(p=rnd(number, 1000 ** 5))
elif number < 1000 ** 7:
return _('{e}E').format(e=rnd(number, 1000 ** 6))
elif number < 1000 ** 8:
return _('{z}Z').format(z=rnd(number, 1000 ** 7))
else:
return _('{y}Y').format(y=rnd(number, 1000 ** 8))
| 30.563107 | 94 | 0.560038 |
import datetime
import pytz
from babel import numbers
import ckan.lib.i18n as i18n
from ckan.common import _, ungettext
ys)
if months < 13:
return ungettext('{months} month ago', '{months} months ago',
months).format(months=months)
return ungettext('over {years} year ago', 'over {years} years ago',
months / 12).format(years=months / 12)
details = {
'min': datetime_.minute,
'hour': datetime_.hour,
'day': datetime_.day,
'year': datetime_.year,
'month': _MONTH_FUNCTIONS[datetime_.month - 1](),
'timezone': datetime_.tzname(),
}
if with_hours:
return (
_('{month} {day}, {year}, {hour:02}:{min:02} ({timezone})') \
.format(**details))
else:
return (
_('{month} {day}, {year}').format(**details))
def localised_number(number):
return numbers.format_number(number, locale=i18n.get_lang())
def localised_filesize(number):
def rnd(number, divisor):
return localised_number(float(number * 10 / divisor) / 10)
if number < 1024:
return _('{bytes} bytes').format(bytes=localised_number(number))
elif number < 1024 ** 2:
return _('{kibibytes} KiB').format(kibibytes=rnd(number, 1024))
elif number < 1024 ** 3:
return _('{mebibytes} MiB').format(mebibytes=rnd(number, 1024 ** 2))
elif number < 1024 ** 4:
return _('{gibibytes} GiB').format(gibibytes=rnd(number, 1024 ** 3))
else:
return _('{tebibytes} TiB').format(tebibytes=rnd(number, 1024 ** 4))
def localised_SI_number(number):
def rnd(number, divisor):
return localised_number(float(number * 10 / divisor) / 10)
if number < 1000:
return _('{n}').format(n=localised_number(number))
elif number < 1000 ** 2:
return _('{k}k').format(k=rnd(number, 1000))
elif number < 1000 ** 3:
return _('{m}M').format(m=rnd(number, 1000 ** 2))
elif number < 1000 ** 4:
return _('{g}G').format(g=rnd(number, 1000 ** 3))
elif number < 1000 ** 5:
return _('{t}T').format(t=rnd(number, 1000 ** 4))
elif number < 1000 ** 6:
return _('{p}P').format(p=rnd(number, 1000 ** 5))
elif number < 1000 ** 7:
return _('{e}E').format(e=rnd(number, 1000 ** 6))
elif number < 1000 ** 8:
return _('{z}Z').format(z=rnd(number, 1000 ** 7))
else:
return _('{y}Y').format(y=rnd(number, 1000 ** 8))
| true | true |
f731159d8d119b22890a43bc26246da1964a17db | 3,103 | py | Python | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayOpenMiniAmpeTracerSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniAmpeTracerSyncModel(object):
def __init__(self):
self._device_id = None
self._product_id = None
self._spm_a = None
self._spm_b = None
self._spm_c = None
self._spm_d = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def product_id(self):
return self._product_id
@product_id.setter
def product_id(self, value):
self._product_id = value
@property
def spm_a(self):
return self._spm_a
@spm_a.setter
def spm_a(self, value):
self._spm_a = value
@property
def spm_b(self):
return self._spm_b
@spm_b.setter
def spm_b(self, value):
self._spm_b = value
@property
def spm_c(self):
return self._spm_c
@spm_c.setter
def spm_c(self, value):
self._spm_c = value
@property
def spm_d(self):
return self._spm_d
@spm_d.setter
def spm_d(self, value):
self._spm_d = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.product_id:
if hasattr(self.product_id, 'to_alipay_dict'):
params['product_id'] = self.product_id.to_alipay_dict()
else:
params['product_id'] = self.product_id
if self.spm_a:
if hasattr(self.spm_a, 'to_alipay_dict'):
params['spm_a'] = self.spm_a.to_alipay_dict()
else:
params['spm_a'] = self.spm_a
if self.spm_b:
if hasattr(self.spm_b, 'to_alipay_dict'):
params['spm_b'] = self.spm_b.to_alipay_dict()
else:
params['spm_b'] = self.spm_b
if self.spm_c:
if hasattr(self.spm_c, 'to_alipay_dict'):
params['spm_c'] = self.spm_c.to_alipay_dict()
else:
params['spm_c'] = self.spm_c
if self.spm_d:
if hasattr(self.spm_d, 'to_alipay_dict'):
params['spm_d'] = self.spm_d.to_alipay_dict()
else:
params['spm_d'] = self.spm_d
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniAmpeTracerSyncModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'product_id' in d:
o.product_id = d['product_id']
if 'spm_a' in d:
o.spm_a = d['spm_a']
if 'spm_b' in d:
o.spm_b = d['spm_b']
if 'spm_c' in d:
o.spm_c = d['spm_c']
if 'spm_d' in d:
o.spm_d = d['spm_d']
return o
| 26.75 | 71 | 0.548179 |
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOpenMiniAmpeTracerSyncModel(object):
def __init__(self):
self._device_id = None
self._product_id = None
self._spm_a = None
self._spm_b = None
self._spm_c = None
self._spm_d = None
@property
def device_id(self):
return self._device_id
@device_id.setter
def device_id(self, value):
self._device_id = value
@property
def product_id(self):
return self._product_id
@product_id.setter
def product_id(self, value):
self._product_id = value
@property
def spm_a(self):
return self._spm_a
@spm_a.setter
def spm_a(self, value):
self._spm_a = value
@property
def spm_b(self):
return self._spm_b
@spm_b.setter
def spm_b(self, value):
self._spm_b = value
@property
def spm_c(self):
return self._spm_c
@spm_c.setter
def spm_c(self, value):
self._spm_c = value
@property
def spm_d(self):
return self._spm_d
@spm_d.setter
def spm_d(self, value):
self._spm_d = value
def to_alipay_dict(self):
params = dict()
if self.device_id:
if hasattr(self.device_id, 'to_alipay_dict'):
params['device_id'] = self.device_id.to_alipay_dict()
else:
params['device_id'] = self.device_id
if self.product_id:
if hasattr(self.product_id, 'to_alipay_dict'):
params['product_id'] = self.product_id.to_alipay_dict()
else:
params['product_id'] = self.product_id
if self.spm_a:
if hasattr(self.spm_a, 'to_alipay_dict'):
params['spm_a'] = self.spm_a.to_alipay_dict()
else:
params['spm_a'] = self.spm_a
if self.spm_b:
if hasattr(self.spm_b, 'to_alipay_dict'):
params['spm_b'] = self.spm_b.to_alipay_dict()
else:
params['spm_b'] = self.spm_b
if self.spm_c:
if hasattr(self.spm_c, 'to_alipay_dict'):
params['spm_c'] = self.spm_c.to_alipay_dict()
else:
params['spm_c'] = self.spm_c
if self.spm_d:
if hasattr(self.spm_d, 'to_alipay_dict'):
params['spm_d'] = self.spm_d.to_alipay_dict()
else:
params['spm_d'] = self.spm_d
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniAmpeTracerSyncModel()
if 'device_id' in d:
o.device_id = d['device_id']
if 'product_id' in d:
o.product_id = d['product_id']
if 'spm_a' in d:
o.spm_a = d['spm_a']
if 'spm_b' in d:
o.spm_b = d['spm_b']
if 'spm_c' in d:
o.spm_c = d['spm_c']
if 'spm_d' in d:
o.spm_d = d['spm_d']
return o
| true | true |
f7311738dee08b938940c7168a1cd11f92e41b95 | 338 | py | Python | cn/opencv/chapter01/01Read.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | null | null | null | cn/opencv/chapter01/01Read.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | null | null | null | cn/opencv/chapter01/01Read.py | Jasonandy/Python-X | 2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe | [
"Apache-2.0"
] | 2 | 2019-06-18T05:53:26.000Z | 2019-06-19T03:26:02.000Z | """
read picture
"""
import cv2
def read_picture(path):
"""
读取图片
:return:
"""
img = cv2.imread(path)
cv2.namedWindow("OPEN_CV_READ_IMG")
cv2.imshow("OPEN_CV_READ_IMG", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
path = "../media/lena/lena.jpg"
read_picture(path)
| 15.363636 | 39 | 0.615385 | import cv2
def read_picture(path):
img = cv2.imread(path)
cv2.namedWindow("OPEN_CV_READ_IMG")
cv2.imshow("OPEN_CV_READ_IMG", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
path = "../media/lena/lena.jpg"
read_picture(path)
| true | true |
f73117825794ffc13c74bb610938ec6e7e2bbad0 | 1,828 | py | Python | pymenu/elements.py | feftio/pymenu | 846b916ce55548c53f43f3642d69e6c64fd9d774 | [
"MIT"
] | null | null | null | pymenu/elements.py | feftio/pymenu | 846b916ce55548c53f43f3642d69e6c64fd9d774 | [
"MIT"
] | null | null | null | pymenu/elements.py | feftio/pymenu | 846b916ce55548c53f43f3642d69e6c64fd9d774 | [
"MIT"
] | null | null | null | from __future__ import annotations
import typing as t
from abc import ABC, abstractmethod
from pymenu.listener import GroupListener, Listener, ListenerInterface
from pymenu.triggers import Trigger
from rich import print
class ElementInterface(ListenerInterface, ABC):
@abstractmethod
def render(self) -> None:
pass
class Element(ElementInterface, Listener):
pass
class GroupElement(ElementInterface, GroupListener):
def __init__(self, childs: t.Tuple[ElementInterface]):
self.childs = childs
class Group(GroupElement):
def __init__(self, *childs: t.Tuple[ElementInterface]):
super().__init__(childs)
def render(self) -> None:
for element in self.childs:
element.render()
class Item(Element):
# TODO: make "label" optional.
def __init__(self, label: t.Optional[str] = None, action: t.Optional[t.Callable] = None, triggers: t.Optional[Trigger] = None):
self.label: t.Optional[str] = label
self.listener(action, triggers)
def render(self) -> None:
if self.label is None:
return
print(self.label)
class Hidden(Element):
def __init__(self, action: t.Optional[t.Callable] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
self.listener(action, triggers)
def render(self) -> None:
pass
class Redirect(Item):
def __init__(self, to: str, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
self.to = to
def _action(self) -> None:
pass
class Back(Item):
def __init__(self, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
def _action(self) -> None:
pass
| 26.882353 | 131 | 0.669037 | from __future__ import annotations
import typing as t
from abc import ABC, abstractmethod
from pymenu.listener import GroupListener, Listener, ListenerInterface
from pymenu.triggers import Trigger
from rich import print
class ElementInterface(ListenerInterface, ABC):
@abstractmethod
def render(self) -> None:
pass
class Element(ElementInterface, Listener):
pass
class GroupElement(ElementInterface, GroupListener):
def __init__(self, childs: t.Tuple[ElementInterface]):
self.childs = childs
class Group(GroupElement):
def __init__(self, *childs: t.Tuple[ElementInterface]):
super().__init__(childs)
def render(self) -> None:
for element in self.childs:
element.render()
class Item(Element):
def __init__(self, label: t.Optional[str] = None, action: t.Optional[t.Callable] = None, triggers: t.Optional[Trigger] = None):
self.label: t.Optional[str] = label
self.listener(action, triggers)
def render(self) -> None:
if self.label is None:
return
print(self.label)
class Hidden(Element):
def __init__(self, action: t.Optional[t.Callable] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
self.listener(action, triggers)
def render(self) -> None:
pass
class Redirect(Item):
def __init__(self, to: str, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
self.to = to
def _action(self) -> None:
pass
class Back(Item):
def __init__(self, label: t.Optional[str] = None, triggers: t.Optional[t.Tuple[Trigger]] = None):
super().__init__(label, self._action, triggers)
def _action(self) -> None:
pass
| true | true |
f7311956fa5e3a221660dc56d6dcaaef28e32f55 | 1,662 | py | Python | tests/test_calculator.py | amard33p/minimal-pytest-project | 45844a70d8c8a6499a038a6eb99bf7b7b78ccdd8 | [
"Apache-2.0"
] | 2 | 2020-12-08T14:35:19.000Z | 2022-01-18T21:35:14.000Z | tests/test_calculator.py | amard33p/minimal-pytest-project | 45844a70d8c8a6499a038a6eb99bf7b7b78ccdd8 | [
"Apache-2.0"
] | null | null | null | tests/test_calculator.py | amard33p/minimal-pytest-project | 45844a70d8c8a6499a038a6eb99bf7b7b78ccdd8 | [
"Apache-2.0"
] | 1 | 2021-09-13T12:36:32.000Z | 2021-09-13T12:36:32.000Z | import pytest
import mymath.calculator
from mymath.calculator import add, div, filesum, fileconcat, approx_eq
# Simple tests
# ----------------------------------------------------
def test_add():
assert add(1, 2) == 3
def test_div():
assert div(4, 2) == 2
assert div(0, 2) == 0
# Catching exceptions
# ------------------------------------------------------------------------------
def test_div_by_zero():
with pytest.raises(ValueError) as ex:
div(1, 0)
assert str(ex.value) == 'Cannot divide by zero!'
# Tests organized in class
# ------------------------------------------------------------------------------
class TestCalculator:
def test_add(self):
assert add(1, 2) == 3
def test_add_zero(self):
assert add(0, 0) == 0
assert add(1, 0) == 1
assert add(0, 2) == 2
def test_div(self):
assert div(4, 2) == 2
# Fixtures
# ------------------------------------------------------------------------------
@pytest.fixture(scope="function")
def numbers_file():
f = open("tests/data/numbers.txt")
def fin():
f.close()
return f
def test_filesum(numbers_file):
assert filesum(numbers_file) == 6
def test_fileconcat(numbers_file):
assert fileconcat(numbers_file) == 123
# Monkey patching, Mocking
# ------------------------------------------------------------------------------
def test_approx_eq(monkeypatch):
def mock_eps(machine):
return 2
assert approx_eq(1, 1)
#monkeypatch.setattr(mymath.calculator, 'eps', mock_eps)
monkeypatch.setattr('mymath.calculator.eps', mock_eps)
assert approx_eq(1, 2)
| 23.742857 | 80 | 0.493983 | import pytest
import mymath.calculator
from mymath.calculator import add, div, filesum, fileconcat, approx_eq
def test_add():
assert add(1, 2) == 3
def test_div():
assert div(4, 2) == 2
assert div(0, 2) == 0
def test_div_by_zero():
with pytest.raises(ValueError) as ex:
div(1, 0)
assert str(ex.value) == 'Cannot divide by zero!'
class TestCalculator:
def test_add(self):
assert add(1, 2) == 3
def test_add_zero(self):
assert add(0, 0) == 0
assert add(1, 0) == 1
assert add(0, 2) == 2
def test_div(self):
assert div(4, 2) == 2
@pytest.fixture(scope="function")
def numbers_file():
f = open("tests/data/numbers.txt")
def fin():
f.close()
return f
def test_filesum(numbers_file):
assert filesum(numbers_file) == 6
def test_fileconcat(numbers_file):
assert fileconcat(numbers_file) == 123
def test_approx_eq(monkeypatch):
def mock_eps(machine):
return 2
assert approx_eq(1, 1)
monkeypatch.setattr('mymath.calculator.eps', mock_eps)
assert approx_eq(1, 2)
| true | true |
f731198351b74c2e46960cec248a5589d6459d40 | 18,893 | py | Python | redis_in_action/redis_action_ch05.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 1 | 2018-12-19T22:07:56.000Z | 2018-12-19T22:07:56.000Z | redis_in_action/redis_action_ch05.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 12 | 2020-03-14T05:32:26.000Z | 2022-03-12T00:08:49.000Z | redis_in_action/redis_action_ch05.py | ftconan/python3 | eb63ba33960072f792ecce6db809866b38c402f8 | [
"MIT"
] | 1 | 2018-12-19T22:08:00.000Z | 2018-12-19T22:08:00.000Z | """
@author: magician
@file: redis_action_ch05.py
@date: 2021/11/22
"""
import bisect
import contextlib
import csv
import functools
import json
import logging
import random
import threading
import time
import unittest
import uuid
import redis
from datetime import datetime
QUIT = False
SAMPLE_COUNT = 100
config_connection = None
SEVERITY = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'waring',
logging.ERROR: 'error',
logging.CRITICAL: 'critical',
}
SEVERITY.update((name, name) for name in list(SEVERITY.values()))
PRECISION = [1, 5, 60, 300, 3600, 18000, 86400]
LAST_CHECKED = None
IS_UNDER_MAINTENANCE = False
CONFIGS = {}
CHECKED = {}
REDIS_CONNECTIONS = {}
def to_bytes(x):
"""
to_bytes
@param x:
@return:
"""
return x.encode() if isinstance(x, str) else x
def to_str(x):
"""
to_str
@param x:
@return:
"""
return x.decode() if isinstance(x, bytes) else x
def log_recent(conn, name, message, severity=logging.INFO, pipe=None):
"""
log_recent
@param conn:
@param name:
@param message:
@param severity:
@param pipe:
@return:
"""
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'recent:%s:%s' % (name, severity)
message = time.asctime() + ' ' + message
pipe = pipe or conn.pipeline()
pipe.lpush(destination, message)
pipe.ltrim(destination, 0, 99)
pipe.execute()
def log_common(conn, name, message, severity=logging.INFO, timeout=5):
"""
log_common
@param conn:
@param name:
@param message:
@param severity:
@param timeout:
@return:
"""
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'common:%s:%s' % (name, severity)
start_key = destination + ':start'
pipe = conn.pipeline()
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if existing and existing < to_bytes(hour_start):
pipe.rename(destination, destination + ':last')
pipe.rename(destination, destination + ':pstart')
pipe.set(start_key, hour_start)
elif not existing:
pipe.set(start_key, hour_start)
pipe.zincrby(destination, 1, message)
log_recent(pipe, name, message, severity, pipe)
return
except redis.exceptions.WatchError:
continue
def update_counter(conn, name, count=1, now=None):
"""
update_counter
@param conn:
@param name:
@param count:
@param now:
@return:
"""
now = now or time.time()
pipe = conn.pipeline()
for prec in PRECISION:
pnow = int(now / prec) * prec
hash = '%s:%s' % (prec, name)
pipe.zadd('known:', {hash: 0})
pipe.hincrby('count: ' + hash, pnow, count)
pipe.execute()
def get_counter(conn, name, precision):
"""
get_counter
@param conn:
@param name:
@param precision:
@return:
"""
hash = "%s:%s" % (precision, name)
data = conn.hgetall('count:' + hash)
to_return = []
for key, value in data.items():
to_return.append((int(key), int(value)))
to_return.sort()
return to_return
def clean_counters(conn):
"""
clean_counters
@param conn:
@return:
"""
pipe = conn.pipeline(True)
passes = 0
while not QUIT:
start = time.time()
index = 0
while index < conn.zcard('known:'):
hash = conn.zcard('known:', index, index)
index += 1
if not hash:
break
hash = hash[0]
prec = int(hash.partition(b':')[0])
bprec = int(prec // 60) or 1
if passes % bprec:
continue
hkey = 'count:' + to_str(hash)
cutoff = time.time() - SAMPLE_COUNT * prec
samples = list(map(int, conn.hkeys(hkey)))
samples.sort()
remove = bisect.bisect_right(samples, cutoff)
if remove:
conn.hdel(hkey, *samples[:remove])
if remove == len(samples):
try:
pipe.watch(hkey)
if not pipe.hlen(hkey):
pipe.multi()
pipe.zrem('known:', hash)
pipe.execute()
index -= 1
else:
pipe.unwatch()
except redis.exceptions.WatchError:
pass
passes += 1
duration = min(int(time.time() - start) + 1, 60)
time.sleep(max(60 - duration, 1))
def update_stats(conn, context, type, value, timeout=5):
"""
update_stats
@param conn:
@param context:
@param type:
@param value:
@param timeout:
@return:
"""
destination = 'stats:%s:%s' % (context, type)
start_key = destination + ':start'
pipe = conn.pipeline(True)
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if not existing:
pipe.set(start_key, hour_start)
elif to_str(existing) < hour_start:
pipe.rename(destination, destination + ':last')
pipe.rename(start_key, destination + ':pstart')
pipe.set(start_key, hour_start)
tkey1 = str(uuid.uuid4())
tkey2 = str(uuid.uuid4())
pipe.zadd(tkey1, {'min': value})
pipe.zadd(tkey1, {'max': value})
pipe.zunionstore(destination, [destination, tkey1], aggregate='min')
pipe.zunionstore(destination, [destination, tkey2], aggregate='max')
pipe.delete(tkey1, tkey2)
pipe.zincrby(destination, 1, 'count')
pipe.zincrby(destination, value, 'sum')
pipe.zincrby(destination, value * value, 'sumsq')
return pipe.execute()[-3:]
except redis.exceptions.WatchError:
continue
def get_stats(conn, context, type):
"""
get_stats
@param conn:
@param context:
@param type:
@return:
"""
key = 'stats:%s:%s' % (context, type)
data = dict(conn.zrange(key, 0, -1, withscores=True))
data[b'average'] = data[b'sum'] / data[b'count']
numerator = data[b'sumsq'] - data[b'sum'] ** 2 / data[b'count']
data[b'stddev'] = (numerator / (data[b'count'] - 1 or 1)) ** 0.5
return data
@contextlib.contextmanager
def access_time(conn, context):
"""
access_time
@param conn:
@param context:
@return:
"""
start = time.time()
yield
delta = time.time() - start
stats = update_stats(conn, context, 'AccessTime', delta)
average = stats[1] / stats[0]
pipe = conn.pipeline(True)
pipe.zadd('slowest:AccessTime', {context: average})
pipe.zremrangebyrank('slowest:AccessTime', 0, -101)
pipe.execute()
def process_view(conn, callback):
"""
process_view
@param conn:
@param callback:
@return:
"""
with access_time(conn, request.path):
return callback()
def ip_to_score(ip_address):
"""
ip_to_score
@param ip_address:
@return:
"""
score = 0
for v in ip_address.split('.'):
score = score * 256 + int(v, 10)
return score
def import_ips_to_redis(conn, filename):
"""
import_ips_to_redis
@param conn:
@param filename:
@return:
"""
csv_file = csv.reader(open(filename, 'rb'))
for count, row in enumerate(csv_file):
start_ip = row[0] if row else ''
if 'i' in start_ip.lower():
continue
if '.' in start_ip:
start_ip = ip_to_score(start_ip)
elif start_ip.isdigit():
start_ip = int(start_ip, 10)
else:
continue
city_id = row[2] + '_' + str(count)
conn.zadd('ip2cityid:', {city_id: start_ip})
def import_cities_to_redis(conn, filename):
"""
import_cities_to_redis
@param conn:
@param filename:
@return:
"""
for row in csv.reader(open(filename, 'rb')):
if len(row) < 4 or row[0].isdigit():
continue
row = [i.decode('latin-1') for i in row]
city_id = row[0]
country = row[1]
region = row[2]
city = row[3]
conn.hset('cityid2city:', city_id, json.dumps([city, country, region]))
def find_city_by_ip(conn, ip_address):
"""
find_city_by_ip
@param conn:
@param ip_address:
@return:
"""
if isinstance(ip_address, str):
ip_address = ip_to_score(ip_address)
city_id = conn.zrevrangebyscore('ip2cityid:', ip_address, 0, start=0, num=1)
if not city_id:
return None
city_id = city_id[0].partition('_')[0]
return json.loads(conn.hget('cityid2city:', city_id))
def is_under_maintenance(conn):
"""
is_under_maintenance
@param conn:
@return:
"""
global LAST_CHECKED, IS_UNDER_MAINTENANCE
if (not LAST_CHECKED) or LAST_CHECKED < time.time() - 1:
LAST_CHECKED = time.time()
IS_UNDER_MAINTENANCE = bool(conn.get('is-under-maintenance'))
return IS_UNDER_MAINTENANCE
def set_config(conn, type, component, config):
"""
set_config
@param conn:
@param type:
@param component:
@param config:
@return:
"""
conn.set('config:%s:%s' % (type, component), json.dumps(config))
def get_config(conn, type, component, wait=1):
"""
get_config
@param conn:
@param type:
@param component:
@param wait:
@return:
"""
key = 'config:%s:%s' % (type, component)
ch = CHECKED.get(key)
if (not ch) or ch < time.time() - wait:
CHECKED[key] = time.time()
config = json.loads(conn.get(key) or '{}')
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key)
if config != old_config:
CONFIGS[key] = config
return CONFIGS.get(key)
def redis_connection(component, wait=1):
"""
redis_connection
@param component:
@param wait:
@return:
"""
key = 'config:redis:' + component
def wrapper(function):
@functools.wraps(function)
def call(*args, **kwargs):
old_config = CONFIGS.get(key, object())
config = get_config(config_connection, 'redis', component, wait)
if config != old_config:
REDIS_CONNECTIONS[key] = redis.Redis(**config)
return function(REDIS_CONNECTIONS.get(key), *args, **kwargs)
return call
return wrapper
# --------------- Below this line are helpers to test the code ----------------
class request:
pass
# # a faster version with pipelines for actual testing
# def import_ips_to_redis(conn, filename):
# csv_file = csv.reader(open(filename, 'rb'))
# pipe = conn.pipeline(False)
# for count, row in enumerate(csv_file):
# start_ip = row[0] if row else ''
# if 'i' in start_ip.lower():
# continue
# if '.' in start_ip:
# start_ip = ip_to_score(start_ip)
# elif start_ip.isdigit():
# start_ip = int(start_ip, 10)
# else:
# continue
#
# city_id = row[2] + '_' + str(count)
# pipe.zadd('ip2cityid:', {city_id: start_ip})
# if not (count + 1) % 1000:
# pipe.execute()
# pipe.execute()
#
#
# def import_cities_to_redis(conn, filename):
# pipe = conn.pipeline(False)
# for count, row in enumerate(csv.reader(open(filename, 'rb'))):
# if len(row) < 4 or not row[0].isdigit():
# continue
# row = [i.decode('latin-1') for i in row]
# city_id = row[0]
# country = row[1]
# region = row[2]
# city = row[3]
# pipe.hset('cityid2city:', city_id,
# json.dumps([city, region, country]))
# if not (count + 1) % 1000:
# pipe.execute()
# pipe.execute()
class TestCh05(unittest.TestCase):
def setUp(self):
global config_connection
import redis
self.conn = config_connection = redis.Redis(db=15, password='123456')
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
del self.conn
global config_connection, QUIT, SAMPLE_COUNT
config_connection = None
QUIT = False
SAMPLE_COUNT = 100
print()
print()
def test_log_recent(self):
import pprint
conn = self.conn
print("Let's write a few logs to the recent log")
for msg in range(5):
log_recent(conn, 'test', 'this is message %s' % msg)
recent = conn.lrange('recent:test:info', 0, -1)
print("The current recent message log has this many messages:", len(recent))
print("Those messages include:")
pprint.pprint(recent[:10])
self.assertTrue(len(recent) >= 5)
def test_log_common(self):
import pprint
conn = self.conn
print("Let's write some items to the common log")
for count in range(1, 6):
for i in range(count):
log_common(conn, 'test', "message-%s" % count)
common = conn.zrevrange('common:test:info', 0, -1, withscores=True)
print("The current number of common messages is:", len(common))
print("Those common messages are:")
pprint.pprint(common)
self.assertTrue(len(common) >= 5)
def test_counters(self):
import pprint
global QUIT, SAMPLE_COUNT
conn = self.conn
print("Let's update some counters for now and a little in the future")
now = time.time()
for delta in range(10):
update_counter(conn, 'test', count=random.randrange(1, 5), now=now + delta)
counter = get_counter(conn, 'test', 1)
print("We have some per-second counters:", len(counter))
self.assertTrue(len(counter) >= 10)
counter = get_counter(conn, 'test', 5)
print("We have some per-5-second counters:", len(counter))
print("These counters include:")
pprint.pprint(counter[:10])
self.assertTrue(len(counter) >= 2)
print()
tt = time.time
def new_tt():
return tt() + 2 * 86400
time.time = new_tt
print("Let's clean out some counters by setting our sample count to 0")
SAMPLE_COUNT = 0
t = threading.Thread(target=clean_counters, args=(conn,))
t.setDaemon(1) # to make sure it dies if we ctrl+C quit
t.start()
time.sleep(1)
QUIT = True
time.time = tt
counter = get_counter(conn, 'test', 86400)
print("Did we clean out all of the counters?", not counter)
self.assertFalse(counter)
def test_stats(self):
import pprint
conn = self.conn
print("Let's add some data for our statistics!")
for i in range(5):
r = update_stats(conn, 'temp', 'example', random.randrange(5, 15))
print("We have some aggregate statistics:", r)
rr = get_stats(conn, 'temp', 'example')
print("Which we can also fetch manually:")
pprint.pprint(rr)
self.assertTrue(rr[b'count'] >= 5)
def test_access_time(self):
import pprint
conn = self.conn
print("Let's calculate some access times...")
for i in range(10):
with access_time(conn, "req-%s" % i):
time.sleep(.5 + random.random())
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
print()
def cb():
time.sleep(1 + random.random())
print("Let's use the callback version...")
for i in range(5):
request.path = 'cbreq-%s' % i
process_view(conn, cb)
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
def test_ip_lookup(self):
conn = self.conn
try:
open('GeoLiteCity-Blocks.csv', 'rb')
open('GeoLiteCity-Location.csv', 'rb')
except:
print("********")
print("You do not have the GeoLiteCity database available, aborting test")
print("Please have the following two files in the current path:")
print("GeoLiteCity-Blocks.csv")
print("GeoLiteCity-Location.csv")
print("********")
return
print("Importing IP addresses to Redis... (this may take a while)")
import_ips_to_redis(conn, 'GeoLiteCity-Blocks.csv')
ranges = conn.zcard('ip2cityid:')
print("Loaded ranges into Redis:", ranges)
self.assertTrue(ranges > 1000)
print()
print("Importing Location lookups to Redis... (this may take a while)")
import_cities_to_redis(conn, 'GeoLiteCity-Location.csv')
cities = conn.hlen('cityid2city:')
print("Loaded city lookups into Redis:", cities)
self.assertTrue(cities > 1000)
print()
print("Let's lookup some locations!")
rr = random.randrange
for i in range(5):
print(find_city_by_ip(conn, '%s.%s.%s.%s' % (rr(1, 255), rr(256), rr(256), rr(256))))
def test_is_under_maintenance(self):
print("Are we under maintenance (we shouldn't be)?", is_under_maintenance(self.conn))
self.conn.set('is-under-maintenance', 'yes')
print("We cached this, so it should be the same:", is_under_maintenance(self.conn))
time.sleep(1)
print("But after a sleep, it should change:", is_under_maintenance(self.conn))
print("Cleaning up...")
self.conn.delete('is-under-maintenance')
time.sleep(1)
print("Should be False again:", is_under_maintenance(self.conn))
def test_config(self):
print("Let's set a config and then get a connection from that config...")
set_config(self.conn, 'redis', 'test', {'db': 15})
@redis_connection('test')
def test(conn2):
return bool(conn2.info())
print("We can run commands from the configured connection:", test())
if __name__ == '__main__':
unittest.main()
| 27.743025 | 97 | 0.568094 | import bisect
import contextlib
import csv
import functools
import json
import logging
import random
import threading
import time
import unittest
import uuid
import redis
from datetime import datetime
QUIT = False
SAMPLE_COUNT = 100
config_connection = None
SEVERITY = {
logging.DEBUG: 'debug',
logging.INFO: 'info',
logging.WARNING: 'waring',
logging.ERROR: 'error',
logging.CRITICAL: 'critical',
}
SEVERITY.update((name, name) for name in list(SEVERITY.values()))
PRECISION = [1, 5, 60, 300, 3600, 18000, 86400]
LAST_CHECKED = None
IS_UNDER_MAINTENANCE = False
CONFIGS = {}
CHECKED = {}
REDIS_CONNECTIONS = {}
def to_bytes(x):
return x.encode() if isinstance(x, str) else x
def to_str(x):
return x.decode() if isinstance(x, bytes) else x
def log_recent(conn, name, message, severity=logging.INFO, pipe=None):
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'recent:%s:%s' % (name, severity)
message = time.asctime() + ' ' + message
pipe = pipe or conn.pipeline()
pipe.lpush(destination, message)
pipe.ltrim(destination, 0, 99)
pipe.execute()
def log_common(conn, name, message, severity=logging.INFO, timeout=5):
severity = str(SEVERITY.get(severity, severity)).lower()
destination = 'common:%s:%s' % (name, severity)
start_key = destination + ':start'
pipe = conn.pipeline()
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if existing and existing < to_bytes(hour_start):
pipe.rename(destination, destination + ':last')
pipe.rename(destination, destination + ':pstart')
pipe.set(start_key, hour_start)
elif not existing:
pipe.set(start_key, hour_start)
pipe.zincrby(destination, 1, message)
log_recent(pipe, name, message, severity, pipe)
return
except redis.exceptions.WatchError:
continue
def update_counter(conn, name, count=1, now=None):
now = now or time.time()
pipe = conn.pipeline()
for prec in PRECISION:
pnow = int(now / prec) * prec
hash = '%s:%s' % (prec, name)
pipe.zadd('known:', {hash: 0})
pipe.hincrby('count: ' + hash, pnow, count)
pipe.execute()
def get_counter(conn, name, precision):
hash = "%s:%s" % (precision, name)
data = conn.hgetall('count:' + hash)
to_return = []
for key, value in data.items():
to_return.append((int(key), int(value)))
to_return.sort()
return to_return
def clean_counters(conn):
pipe = conn.pipeline(True)
passes = 0
while not QUIT:
start = time.time()
index = 0
while index < conn.zcard('known:'):
hash = conn.zcard('known:', index, index)
index += 1
if not hash:
break
hash = hash[0]
prec = int(hash.partition(b':')[0])
bprec = int(prec // 60) or 1
if passes % bprec:
continue
hkey = 'count:' + to_str(hash)
cutoff = time.time() - SAMPLE_COUNT * prec
samples = list(map(int, conn.hkeys(hkey)))
samples.sort()
remove = bisect.bisect_right(samples, cutoff)
if remove:
conn.hdel(hkey, *samples[:remove])
if remove == len(samples):
try:
pipe.watch(hkey)
if not pipe.hlen(hkey):
pipe.multi()
pipe.zrem('known:', hash)
pipe.execute()
index -= 1
else:
pipe.unwatch()
except redis.exceptions.WatchError:
pass
passes += 1
duration = min(int(time.time() - start) + 1, 60)
time.sleep(max(60 - duration, 1))
def update_stats(conn, context, type, value, timeout=5):
destination = 'stats:%s:%s' % (context, type)
start_key = destination + ':start'
pipe = conn.pipeline(True)
end = time.time() + timeout
while time.time() < end:
try:
pipe.watch(start_key)
now = datetime.utcnow().timetuple()
hour_start = datetime(*now[:4]).isoformat()
existing = pipe.get(start_key)
pipe.multi()
if not existing:
pipe.set(start_key, hour_start)
elif to_str(existing) < hour_start:
pipe.rename(destination, destination + ':last')
pipe.rename(start_key, destination + ':pstart')
pipe.set(start_key, hour_start)
tkey1 = str(uuid.uuid4())
tkey2 = str(uuid.uuid4())
pipe.zadd(tkey1, {'min': value})
pipe.zadd(tkey1, {'max': value})
pipe.zunionstore(destination, [destination, tkey1], aggregate='min')
pipe.zunionstore(destination, [destination, tkey2], aggregate='max')
pipe.delete(tkey1, tkey2)
pipe.zincrby(destination, 1, 'count')
pipe.zincrby(destination, value, 'sum')
pipe.zincrby(destination, value * value, 'sumsq')
return pipe.execute()[-3:]
except redis.exceptions.WatchError:
continue
def get_stats(conn, context, type):
key = 'stats:%s:%s' % (context, type)
data = dict(conn.zrange(key, 0, -1, withscores=True))
data[b'average'] = data[b'sum'] / data[b'count']
numerator = data[b'sumsq'] - data[b'sum'] ** 2 / data[b'count']
data[b'stddev'] = (numerator / (data[b'count'] - 1 or 1)) ** 0.5
return data
@contextlib.contextmanager
def access_time(conn, context):
start = time.time()
yield
delta = time.time() - start
stats = update_stats(conn, context, 'AccessTime', delta)
average = stats[1] / stats[0]
pipe = conn.pipeline(True)
pipe.zadd('slowest:AccessTime', {context: average})
pipe.zremrangebyrank('slowest:AccessTime', 0, -101)
pipe.execute()
def process_view(conn, callback):
with access_time(conn, request.path):
return callback()
def ip_to_score(ip_address):
score = 0
for v in ip_address.split('.'):
score = score * 256 + int(v, 10)
return score
def import_ips_to_redis(conn, filename):
csv_file = csv.reader(open(filename, 'rb'))
for count, row in enumerate(csv_file):
start_ip = row[0] if row else ''
if 'i' in start_ip.lower():
continue
if '.' in start_ip:
start_ip = ip_to_score(start_ip)
elif start_ip.isdigit():
start_ip = int(start_ip, 10)
else:
continue
city_id = row[2] + '_' + str(count)
conn.zadd('ip2cityid:', {city_id: start_ip})
def import_cities_to_redis(conn, filename):
for row in csv.reader(open(filename, 'rb')):
if len(row) < 4 or row[0].isdigit():
continue
row = [i.decode('latin-1') for i in row]
city_id = row[0]
country = row[1]
region = row[2]
city = row[3]
conn.hset('cityid2city:', city_id, json.dumps([city, country, region]))
def find_city_by_ip(conn, ip_address):
if isinstance(ip_address, str):
ip_address = ip_to_score(ip_address)
city_id = conn.zrevrangebyscore('ip2cityid:', ip_address, 0, start=0, num=1)
if not city_id:
return None
city_id = city_id[0].partition('_')[0]
return json.loads(conn.hget('cityid2city:', city_id))
def is_under_maintenance(conn):
global LAST_CHECKED, IS_UNDER_MAINTENANCE
if (not LAST_CHECKED) or LAST_CHECKED < time.time() - 1:
LAST_CHECKED = time.time()
IS_UNDER_MAINTENANCE = bool(conn.get('is-under-maintenance'))
return IS_UNDER_MAINTENANCE
def set_config(conn, type, component, config):
conn.set('config:%s:%s' % (type, component), json.dumps(config))
def get_config(conn, type, component, wait=1):
key = 'config:%s:%s' % (type, component)
ch = CHECKED.get(key)
if (not ch) or ch < time.time() - wait:
CHECKED[key] = time.time()
config = json.loads(conn.get(key) or '{}')
config = dict((str(k), config[k]) for k in config)
old_config = CONFIGS.get(key)
if config != old_config:
CONFIGS[key] = config
return CONFIGS.get(key)
def redis_connection(component, wait=1):
key = 'config:redis:' + component
def wrapper(function):
@functools.wraps(function)
def call(*args, **kwargs):
old_config = CONFIGS.get(key, object())
config = get_config(config_connection, 'redis', component, wait)
if config != old_config:
REDIS_CONNECTIONS[key] = redis.Redis(**config)
return function(REDIS_CONNECTIONS.get(key), *args, **kwargs)
return call
return wrapper
class request:
pass
5(unittest.TestCase):
def setUp(self):
global config_connection
import redis
self.conn = config_connection = redis.Redis(db=15, password='123456')
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
del self.conn
global config_connection, QUIT, SAMPLE_COUNT
config_connection = None
QUIT = False
SAMPLE_COUNT = 100
print()
print()
def test_log_recent(self):
import pprint
conn = self.conn
print("Let's write a few logs to the recent log")
for msg in range(5):
log_recent(conn, 'test', 'this is message %s' % msg)
recent = conn.lrange('recent:test:info', 0, -1)
print("The current recent message log has this many messages:", len(recent))
print("Those messages include:")
pprint.pprint(recent[:10])
self.assertTrue(len(recent) >= 5)
def test_log_common(self):
import pprint
conn = self.conn
print("Let's write some items to the common log")
for count in range(1, 6):
for i in range(count):
log_common(conn, 'test', "message-%s" % count)
common = conn.zrevrange('common:test:info', 0, -1, withscores=True)
print("The current number of common messages is:", len(common))
print("Those common messages are:")
pprint.pprint(common)
self.assertTrue(len(common) >= 5)
def test_counters(self):
import pprint
global QUIT, SAMPLE_COUNT
conn = self.conn
print("Let's update some counters for now and a little in the future")
now = time.time()
for delta in range(10):
update_counter(conn, 'test', count=random.randrange(1, 5), now=now + delta)
counter = get_counter(conn, 'test', 1)
print("We have some per-second counters:", len(counter))
self.assertTrue(len(counter) >= 10)
counter = get_counter(conn, 'test', 5)
print("We have some per-5-second counters:", len(counter))
print("These counters include:")
pprint.pprint(counter[:10])
self.assertTrue(len(counter) >= 2)
print()
tt = time.time
def new_tt():
return tt() + 2 * 86400
time.time = new_tt
print("Let's clean out some counters by setting our sample count to 0")
SAMPLE_COUNT = 0
t = threading.Thread(target=clean_counters, args=(conn,))
t.setDaemon(1)
t.start()
time.sleep(1)
QUIT = True
time.time = tt
counter = get_counter(conn, 'test', 86400)
print("Did we clean out all of the counters?", not counter)
self.assertFalse(counter)
def test_stats(self):
import pprint
conn = self.conn
print("Let's add some data for our statistics!")
for i in range(5):
r = update_stats(conn, 'temp', 'example', random.randrange(5, 15))
print("We have some aggregate statistics:", r)
rr = get_stats(conn, 'temp', 'example')
print("Which we can also fetch manually:")
pprint.pprint(rr)
self.assertTrue(rr[b'count'] >= 5)
def test_access_time(self):
import pprint
conn = self.conn
print("Let's calculate some access times...")
for i in range(10):
with access_time(conn, "req-%s" % i):
time.sleep(.5 + random.random())
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
print()
def cb():
time.sleep(1 + random.random())
print("Let's use the callback version...")
for i in range(5):
request.path = 'cbreq-%s' % i
process_view(conn, cb)
print("The slowest access times are:")
atimes = conn.zrevrange('slowest:AccessTime', 0, -1, withscores=True)
pprint.pprint(atimes[:10])
self.assertTrue(len(atimes) >= 10)
def test_ip_lookup(self):
conn = self.conn
try:
open('GeoLiteCity-Blocks.csv', 'rb')
open('GeoLiteCity-Location.csv', 'rb')
except:
print("********")
print("You do not have the GeoLiteCity database available, aborting test")
print("Please have the following two files in the current path:")
print("GeoLiteCity-Blocks.csv")
print("GeoLiteCity-Location.csv")
print("********")
return
print("Importing IP addresses to Redis... (this may take a while)")
import_ips_to_redis(conn, 'GeoLiteCity-Blocks.csv')
ranges = conn.zcard('ip2cityid:')
print("Loaded ranges into Redis:", ranges)
self.assertTrue(ranges > 1000)
print()
print("Importing Location lookups to Redis... (this may take a while)")
import_cities_to_redis(conn, 'GeoLiteCity-Location.csv')
cities = conn.hlen('cityid2city:')
print("Loaded city lookups into Redis:", cities)
self.assertTrue(cities > 1000)
print()
print("Let's lookup some locations!")
rr = random.randrange
for i in range(5):
print(find_city_by_ip(conn, '%s.%s.%s.%s' % (rr(1, 255), rr(256), rr(256), rr(256))))
def test_is_under_maintenance(self):
print("Are we under maintenance (we shouldn't be)?", is_under_maintenance(self.conn))
self.conn.set('is-under-maintenance', 'yes')
print("We cached this, so it should be the same:", is_under_maintenance(self.conn))
time.sleep(1)
print("But after a sleep, it should change:", is_under_maintenance(self.conn))
print("Cleaning up...")
self.conn.delete('is-under-maintenance')
time.sleep(1)
print("Should be False again:", is_under_maintenance(self.conn))
def test_config(self):
print("Let's set a config and then get a connection from that config...")
set_config(self.conn, 'redis', 'test', {'db': 15})
@redis_connection('test')
def test(conn2):
return bool(conn2.info())
print("We can run commands from the configured connection:", test())
if __name__ == '__main__':
unittest.main()
| true | true |
f7311bee17a55636231d33e9456da8fd2182c8c0 | 4,311 | py | Python | benchmark/startQiskit1590.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit1590.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit1590.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=50
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[0]) # number=44
prog.cz(input_qubit[3],input_qubit[0]) # number=45
prog.h(input_qubit[0]) # number=46
prog.cx(input_qubit[3],input_qubit[0]) # number=47
prog.z(input_qubit[3]) # number=48
prog.cx(input_qubit[3],input_qubit[0]) # number=49
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1590.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.413534 | 82 | 0.617954 |
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[4])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.z(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.rx(0.11938052083641225,input_qubit[1])
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0])
prog.rx(1.4765485471872026,input_qubit[2])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0])
prog.x(input_qubit[4])
prog.x(input_qubit[1])
prog.x(input_qubit[2])
prog.rx(0.45238934211692994,input_qubit[3])
prog.y(input_qubit[1])
prog.rx(-2.5258404934861938,input_qubit[1])
prog.h(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0])
prog.rx(-0.0722566310325653,input_qubit[4])
prog.x(input_qubit[1])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.h(input_qubit[4])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[3])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1590.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f7311d85c66f7c08923fda4bf9edb27d6e1b385e | 305 | py | Python | data/multilingual/Latn.VMW/Serif_12/pdf_to_json_test_Latn.VMW_Serif_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.VMW/Serif_12/pdf_to_json_test_Latn.VMW_Serif_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.VMW/Serif_12/pdf_to_json_test_Latn.VMW_Serif_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.VMW/Serif_12/udhr_Latn.VMW_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5 | 75 | 0.813115 | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.VMW/Serif_12/udhr_Latn.VMW_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| true | true |
f7311e1a59e2934d6f9c806d49f878f1efa58c9f | 475 | py | Python | bootcamp/articles/migrations/0008_auto_20180321_1336.py | ngaurav/j | 99dc01f153155b287f419b7af357e1f7d694466d | [
"MIT"
] | null | null | null | bootcamp/articles/migrations/0008_auto_20180321_1336.py | ngaurav/j | 99dc01f153155b287f419b7af357e1f7d694466d | [
"MIT"
] | null | null | null | bootcamp/articles/migrations/0008_auto_20180321_1336.py | ngaurav/j | 99dc01f153155b287f419b7af357e1f7d694466d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-21 13:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20180113_2139'),
]
operations = [
migrations.AlterField(
model_name='article',
name='slug',
field=models.SlugField(blank=True, max_length=80, null=True),
),
]
| 22.619048 | 73 | 0.623158 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20180113_2139'),
]
operations = [
migrations.AlterField(
model_name='article',
name='slug',
field=models.SlugField(blank=True, max_length=80, null=True),
),
]
| true | true |
f7311e2ccb16402f340b2a257b71cbb087285749 | 7,221 | py | Python | XLMMacroDeobfuscator/xls_wrapper.py | wmetcalf/XLMMacroDeobfuscator | 1a854d8effb4cf5d2e02f10bbb16d58c64c1ebe8 | [
"Apache-2.0"
] | null | null | null | XLMMacroDeobfuscator/xls_wrapper.py | wmetcalf/XLMMacroDeobfuscator | 1a854d8effb4cf5d2e02f10bbb16d58c64c1ebe8 | [
"Apache-2.0"
] | null | null | null | XLMMacroDeobfuscator/xls_wrapper.py | wmetcalf/XLMMacroDeobfuscator | 1a854d8effb4cf5d2e02f10bbb16d58c64c1ebe8 | [
"Apache-2.0"
] | null | null | null | from XLMMacroDeobfuscator.excel_wrapper import ExcelWrapper
from XLMMacroDeobfuscator.boundsheet import Boundsheet
from XLMMacroDeobfuscator.boundsheet import Cell
from win32com.client import Dispatch
import pywintypes
from enum import Enum
import os
import re
class XlCellType(Enum):
xlCellTypeFormulas = -4123
xlCellTypeConstants = 2
class XLSWrapper(ExcelWrapper):
XLEXCEL4MACROSHEET = 3
def __init__(self, xls_doc_path):
self._excel = Dispatch("Excel.Application")
self.xls_workbook = self._excel.Workbooks.Open(xls_doc_path)
self.xls_workbook_name = os.path.basename(xls_doc_path)
self._macrosheets = None
self._defined_names = None
self.xl_international_flags = {}
self._international_flags = None
def get_xl_international_char(self, flag_name):
if flag_name not in self.xl_international_flags:
if self._international_flags is None:
self._international_flags = self._excel.Application.International
# flag value starts at 1, list index starts at 0
self.xl_international_flags[flag_name] = self._international_flags[flag_name.value - 1]
result = self.xl_international_flags[flag_name]
return result
def get_defined_names(self):
result = {}
name_objects = self.xls_workbook.Excel4MacroSheets.Application.Names
for name_obj in name_objects:
result[name_obj.NameLocal.lower()] = str(name_obj.RefersToLocal).strip('=')
return result
def get_defined_name(self, name, full_match=True):
result = []
name = name.lower()
if self._defined_names is None:
self._defined_names = self.get_defined_names()
if full_match:
if name in self._defined_names:
result = self._defined_names[name]
else:
for defined_name, cell_address in self._defined_names.items():
if defined_name.startswith(name):
result.append((defined_name, cell_address))
return result
def load_cells(self, macrosheet, xls_sheet):
cells = {}
try:
self._excel.Application.ScreenUpdating = False
col_offset = xls_sheet.UsedRange.Column
row_offset = xls_sheet.UsedRange.Row
formulas = xls_sheet.UsedRange.Formula
if formulas is not None:
for row_no, row in enumerate(formulas):
for col_no, col in enumerate(row):
if col:
cell = Cell()
cell.sheet = macrosheet
if len(col)>1 and col.startswith('='):
cell.formula = col
else:
cell.value = col
row_addr = row_offset + row_no
col_addr = col_offset + col_no
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
self._excel.Application.ScreenUpdating = True
except pywintypes.com_error as error:
print('CELL(Formula): ' + str(error.args[2]))
try:
values= xls_sheet.UsedRange.Value
if values is not None:
for row_no, row in enumerate(values):
for col_no, col in enumerate(row):
if col:
row_addr = row_offset + row_no
col_addr = col_offset + col_no
if (col_addr, row_addr) in cells:
cell = cells[(col_addr, row_addr)]
cell.value = col
else:
cell = Cell()
cell.sheet = macrosheet
cell.value = col
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
except pywintypes.com_error as error:
print('CELL(Constant): ' + str(error.args[2]))
for cell in cells:
macrosheet.add_cell(cells[cell])
def get_macrosheets(self):
if self._macrosheets is None:
self._macrosheets = {}
for sheet in self.xls_workbook.Excel4MacroSheets:
macrosheet = Boundsheet(sheet.name, 'Macrosheet')
self.load_cells(macrosheet, sheet)
self._macrosheets[sheet.name] = macrosheet
return self._macrosheets
def get_workbook_name(self):
return self.xls_workbook_name
def get_cell_info(self, sheet_name, col, row, type_ID):
sheet = self._excel.Excel4MacroSheets(sheet_name)
cell = col + row
data = None
if int(type_ID) == 2:
data = sheet.Range(col + row).Row
print(data)
elif int(type_ID) == 3:
data = sheet.Range(cell).Column
print(data)
elif int(type_ID) == 8:
data = sheet.Range(cell).HorizontalAlignment
elif int(type_ID) == 17:
data = sheet.Range(cell).Height
elif int(type_ID) == 19:
data = sheet.Range(cell).Font.Size
elif int(type_ID) == 20:
data = sheet.Range(cell).Font.Bold
elif int(type_ID) == 21:
data = sheet.Range(cell).Font.Italic
elif int(type_ID) == 23:
data = sheet.Range(cell).Font.Strikethrough
elif int(type_ID) == 24:
data = sheet.Range(cell).Font.ColorIndex
elif int(type_ID) == 50:
data = sheet.Range(cell).VerticalAlignment
else:
print("Unknown info_type (%d) at cell %s" % (type_ID, cell))
return data, False, False
if __name__ == '__main__':
path = r"tmp\xls\edd554502033d78ac18e4bd917d023da2fd64843c823c1be8bc273f48a5f3f5f.xls"
path = os.path.abspath(path)
excel_doc = XLSWrapper(path)
try:
macrosheets = excel_doc.get_macrosheets()
auto_open_labels = excel_doc.get_defined_name('auto_open', full_match=False)
for label in auto_open_labels:
print('auto_open: {}->{}'.format(label[0], label[1]))
for macrosheet_name in macrosheets:
print('SHEET: {}\t{}'.format(macrosheets[macrosheet_name].name,
macrosheets[macrosheet_name].type))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is not None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
finally:
excel_doc._excel.Application.DisplayAlerts = False
excel_doc._excel.Application.Quit()
| 36.654822 | 99 | 0.568481 | from XLMMacroDeobfuscator.excel_wrapper import ExcelWrapper
from XLMMacroDeobfuscator.boundsheet import Boundsheet
from XLMMacroDeobfuscator.boundsheet import Cell
from win32com.client import Dispatch
import pywintypes
from enum import Enum
import os
import re
class XlCellType(Enum):
xlCellTypeFormulas = -4123
xlCellTypeConstants = 2
class XLSWrapper(ExcelWrapper):
XLEXCEL4MACROSHEET = 3
def __init__(self, xls_doc_path):
self._excel = Dispatch("Excel.Application")
self.xls_workbook = self._excel.Workbooks.Open(xls_doc_path)
self.xls_workbook_name = os.path.basename(xls_doc_path)
self._macrosheets = None
self._defined_names = None
self.xl_international_flags = {}
self._international_flags = None
def get_xl_international_char(self, flag_name):
if flag_name not in self.xl_international_flags:
if self._international_flags is None:
self._international_flags = self._excel.Application.International
self.xl_international_flags[flag_name] = self._international_flags[flag_name.value - 1]
result = self.xl_international_flags[flag_name]
return result
def get_defined_names(self):
result = {}
name_objects = self.xls_workbook.Excel4MacroSheets.Application.Names
for name_obj in name_objects:
result[name_obj.NameLocal.lower()] = str(name_obj.RefersToLocal).strip('=')
return result
def get_defined_name(self, name, full_match=True):
result = []
name = name.lower()
if self._defined_names is None:
self._defined_names = self.get_defined_names()
if full_match:
if name in self._defined_names:
result = self._defined_names[name]
else:
for defined_name, cell_address in self._defined_names.items():
if defined_name.startswith(name):
result.append((defined_name, cell_address))
return result
def load_cells(self, macrosheet, xls_sheet):
cells = {}
try:
self._excel.Application.ScreenUpdating = False
col_offset = xls_sheet.UsedRange.Column
row_offset = xls_sheet.UsedRange.Row
formulas = xls_sheet.UsedRange.Formula
if formulas is not None:
for row_no, row in enumerate(formulas):
for col_no, col in enumerate(row):
if col:
cell = Cell()
cell.sheet = macrosheet
if len(col)>1 and col.startswith('='):
cell.formula = col
else:
cell.value = col
row_addr = row_offset + row_no
col_addr = col_offset + col_no
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
self._excel.Application.ScreenUpdating = True
except pywintypes.com_error as error:
print('CELL(Formula): ' + str(error.args[2]))
try:
values= xls_sheet.UsedRange.Value
if values is not None:
for row_no, row in enumerate(values):
for col_no, col in enumerate(row):
if col:
row_addr = row_offset + row_no
col_addr = col_offset + col_no
if (col_addr, row_addr) in cells:
cell = cells[(col_addr, row_addr)]
cell.value = col
else:
cell = Cell()
cell.sheet = macrosheet
cell.value = col
cell.row = row_addr
cell.column = Cell.convert_to_column_name(col_addr)
cells[(col_addr, row_addr)] = cell
except pywintypes.com_error as error:
print('CELL(Constant): ' + str(error.args[2]))
for cell in cells:
macrosheet.add_cell(cells[cell])
def get_macrosheets(self):
if self._macrosheets is None:
self._macrosheets = {}
for sheet in self.xls_workbook.Excel4MacroSheets:
macrosheet = Boundsheet(sheet.name, 'Macrosheet')
self.load_cells(macrosheet, sheet)
self._macrosheets[sheet.name] = macrosheet
return self._macrosheets
def get_workbook_name(self):
return self.xls_workbook_name
def get_cell_info(self, sheet_name, col, row, type_ID):
sheet = self._excel.Excel4MacroSheets(sheet_name)
cell = col + row
data = None
if int(type_ID) == 2:
data = sheet.Range(col + row).Row
print(data)
elif int(type_ID) == 3:
data = sheet.Range(cell).Column
print(data)
elif int(type_ID) == 8:
data = sheet.Range(cell).HorizontalAlignment
elif int(type_ID) == 17:
data = sheet.Range(cell).Height
elif int(type_ID) == 19:
data = sheet.Range(cell).Font.Size
elif int(type_ID) == 20:
data = sheet.Range(cell).Font.Bold
elif int(type_ID) == 21:
data = sheet.Range(cell).Font.Italic
elif int(type_ID) == 23:
data = sheet.Range(cell).Font.Strikethrough
elif int(type_ID) == 24:
data = sheet.Range(cell).Font.ColorIndex
elif int(type_ID) == 50:
data = sheet.Range(cell).VerticalAlignment
else:
print("Unknown info_type (%d) at cell %s" % (type_ID, cell))
return data, False, False
if __name__ == '__main__':
path = r"tmp\xls\edd554502033d78ac18e4bd917d023da2fd64843c823c1be8bc273f48a5f3f5f.xls"
path = os.path.abspath(path)
excel_doc = XLSWrapper(path)
try:
macrosheets = excel_doc.get_macrosheets()
auto_open_labels = excel_doc.get_defined_name('auto_open', full_match=False)
for label in auto_open_labels:
print('auto_open: {}->{}'.format(label[0], label[1]))
for macrosheet_name in macrosheets:
print('SHEET: {}\t{}'.format(macrosheets[macrosheet_name].name,
macrosheets[macrosheet_name].type))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is not None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
for formula_loc, info in macrosheets[macrosheet_name].cells.items():
if info.formula is None:
print('{}\t{}\t{}'.format(formula_loc, info.formula, info.value))
finally:
excel_doc._excel.Application.DisplayAlerts = False
excel_doc._excel.Application.Quit()
| true | true |
f7311e5292fc49af1408b0183fe1700dc5f1512b | 4,995 | py | Python | .tox/scenario/lib/python2.7/site-packages/futurist/tests/test_executors.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/futurist/tests/test_executors.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | .tox/scenario/lib/python2.7/site-packages/futurist/tests/test_executors.py | bdrich/neutron-lbaas | b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import testscenarios
from testtools import testcase
import futurist
from futurist.tests import base
# Module level functions need to be used since the process pool
# executor can not access instance or lambda level functions (since those
# are not pickleable).
def returns_one():
return 1
def blows_up():
raise RuntimeError("no worky")
def delayed(wait_secs):
time.sleep(wait_secs)
class TestExecutors(testscenarios.TestWithScenarios, base.TestCase):
scenarios = [
('sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True, 'executor_kwargs': {}}),
('green_sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True,
'executor_kwargs': {'green': True}}),
('green', {'executor_cls': futurist.GreenThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('thread', {'executor_cls': futurist.ThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('process', {'executor_cls': futurist.ProcessPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
]
def setUp(self):
super(TestExecutors, self).setUp()
self.executor = self.executor_cls(**self.executor_kwargs)
def tearDown(self):
super(TestExecutors, self).tearDown()
self.executor.shutdown()
self.executor = None
def test_run_one(self):
fut = self.executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertTrue(fut.done())
def test_blows_up(self):
fut = self.executor.submit(blows_up)
self.assertRaises(RuntimeError, fut.result)
self.assertIsInstance(fut.exception(), RuntimeError)
def test_gather_stats(self):
self.executor.submit(blows_up)
self.executor.submit(delayed, 0.2)
self.executor.submit(returns_one)
self.executor.shutdown()
self.assertEqual(3, self.executor.statistics.executed)
self.assertEqual(1, self.executor.statistics.failures)
self.assertGreaterEqual(self.executor.statistics.runtime,
# It appears that the the thread run loop
# may call this before 0.2 seconds (or 0.2
# will not be represented as a float correctly)
# is really up so accommodate for that
# happening...
0.199)
def test_post_shutdown_raises(self):
executor = self.executor_cls(**self.executor_kwargs)
executor.shutdown()
self.assertRaises(RuntimeError, executor.submit, returns_one)
def test_restartable(self):
if not self.restartable:
raise testcase.TestSkipped("not restartable")
else:
executor = self.executor_cls(**self.executor_kwargs)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
executor.shutdown()
self.assertEqual(1, executor.statistics.executed)
self.assertRaises(RuntimeError, executor.submit, returns_one)
executor.restart()
self.assertEqual(0, executor.statistics.executed)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertEqual(1, executor.statistics.executed)
executor.shutdown()
def test_alive(self):
with self.executor_cls(**self.executor_kwargs) as executor:
self.assertTrue(executor.alive)
self.assertFalse(executor.alive)
def test_done_callback(self):
happy_completed = []
unhappy_completed = []
def on_done(fut):
if fut.exception():
unhappy_completed.append(fut)
else:
happy_completed.append(fut)
for i in range(0, 10):
if i % 2 == 0:
fut = self.executor.submit(returns_one)
else:
fut = self.executor.submit(blows_up)
fut.add_done_callback(on_done)
self.executor.shutdown()
self.assertEqual(10, len(happy_completed) + len(unhappy_completed))
self.assertEqual(5, len(unhappy_completed))
self.assertEqual(5, len(happy_completed))
| 35.678571 | 79 | 0.626226 |
import time
import testscenarios
from testtools import testcase
import futurist
from futurist.tests import base
def returns_one():
return 1
def blows_up():
raise RuntimeError("no worky")
def delayed(wait_secs):
time.sleep(wait_secs)
class TestExecutors(testscenarios.TestWithScenarios, base.TestCase):
scenarios = [
('sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True, 'executor_kwargs': {}}),
('green_sync', {'executor_cls': futurist.SynchronousExecutor,
'restartable': True,
'executor_kwargs': {'green': True}}),
('green', {'executor_cls': futurist.GreenThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('thread', {'executor_cls': futurist.ThreadPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
('process', {'executor_cls': futurist.ProcessPoolExecutor,
'restartable': False, 'executor_kwargs': {}}),
]
def setUp(self):
super(TestExecutors, self).setUp()
self.executor = self.executor_cls(**self.executor_kwargs)
def tearDown(self):
super(TestExecutors, self).tearDown()
self.executor.shutdown()
self.executor = None
def test_run_one(self):
fut = self.executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertTrue(fut.done())
def test_blows_up(self):
fut = self.executor.submit(blows_up)
self.assertRaises(RuntimeError, fut.result)
self.assertIsInstance(fut.exception(), RuntimeError)
def test_gather_stats(self):
self.executor.submit(blows_up)
self.executor.submit(delayed, 0.2)
self.executor.submit(returns_one)
self.executor.shutdown()
self.assertEqual(3, self.executor.statistics.executed)
self.assertEqual(1, self.executor.statistics.failures)
self.assertGreaterEqual(self.executor.statistics.runtime,
0.199)
def test_post_shutdown_raises(self):
executor = self.executor_cls(**self.executor_kwargs)
executor.shutdown()
self.assertRaises(RuntimeError, executor.submit, returns_one)
def test_restartable(self):
if not self.restartable:
raise testcase.TestSkipped("not restartable")
else:
executor = self.executor_cls(**self.executor_kwargs)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
executor.shutdown()
self.assertEqual(1, executor.statistics.executed)
self.assertRaises(RuntimeError, executor.submit, returns_one)
executor.restart()
self.assertEqual(0, executor.statistics.executed)
fut = executor.submit(returns_one)
self.assertEqual(1, fut.result())
self.assertEqual(1, executor.statistics.executed)
executor.shutdown()
def test_alive(self):
with self.executor_cls(**self.executor_kwargs) as executor:
self.assertTrue(executor.alive)
self.assertFalse(executor.alive)
def test_done_callback(self):
happy_completed = []
unhappy_completed = []
def on_done(fut):
if fut.exception():
unhappy_completed.append(fut)
else:
happy_completed.append(fut)
for i in range(0, 10):
if i % 2 == 0:
fut = self.executor.submit(returns_one)
else:
fut = self.executor.submit(blows_up)
fut.add_done_callback(on_done)
self.executor.shutdown()
self.assertEqual(10, len(happy_completed) + len(unhappy_completed))
self.assertEqual(5, len(unhappy_completed))
self.assertEqual(5, len(happy_completed))
| true | true |
f7311e8c0c66339dabaf7cf0082947fc52f84663 | 16,658 | py | Python | Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/internal/type_checkers.py | amirpaia/election-campaign-dynamics | b2b32c627cb79c7eb60e458511210308b7ff4035 | [
"CC0-1.0"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/internal/type_checkers.py | amirpaia/election-campaign-dynamics | b2b32c627cb79c7eb60e458511210308b7ff4035 | [
"CC0-1.0"
] | null | null | null | Website/FlaskWebsite/env/Lib/site-packages/google/protobuf/internal/type_checkers.py | amirpaia/election-campaign-dynamics | b2b32c627cb79c7eb60e458511210308b7ff4035 | [
"CC0-1.0"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
corresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import ctypes
import numbers
from google.protobuf.internal import api_implementation
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def TruncateToFourByteFloat(original):
return ctypes.c_float(original).value
def ToShortestFloat(original):
"""Returns the shortest float that has same value in wire."""
# All 4 byte floats have between 6 and 9 significant digits, so we
# start with 6 as the lower bound.
# It has to be iterative because use '.9g' directly can not get rid
# of the noises for most values. For example if set a float_field=0.9
# use '.9g' will print 0.899999976.
precision = 6
rounded = float('{0:.{1}g}'.format(original, precision))
while TruncateToFourByteFloat(rounded) != original:
precision += 1
rounded = float('{0:.{1}g}'.format(original, precision))
return rounded
def SupportsOpenEnums(field_descriptor):
return field_descriptor.containing_type.syntax == "proto3"
def GetTypeChecker(field):
"""Returns a type checker for a message field of the specified types.
Args:
field: FieldDescriptor object for this field.
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field.type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
if SupportsOpenEnums(field):
# When open enums are supported, any int32 can be assigned.
return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]
else:
return EnumValueChecker(field.enum_type)
return _VALUE_CHECKERS[field.cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
"""Type check the provided value and return it.
The returned value might have been normalized to another type.
"""
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# Some field types(float, double and bool) accept other types, must
# convert to the correct type in such cases.
if self._acceptable_types:
if self._acceptable_types[0] in (bool, float):
return self._acceptable_types[0](proposed_value)
return proposed_value
class TypeCheckerWithDefault(TypeChecker):
def __init__(self, default_value, *acceptable_types):
TypeChecker.__init__(self, *acceptable_types)
self._default_value = default_value
def DefaultValue(self):
return self._default_value
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if not self._MIN <= int(proposed_value) <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
# We force all values to int to make alternate implementations where the
# distinction is more significant (e.g. the C++ implementation) simpler.
proposed_value = int(proposed_value)
return proposed_value
def DefaultValue(self):
return 0
class EnumValueChecker(object):
"""Checker used for enum fields. Performs type-check and range check."""
def __init__(self, enum_type):
self._enum_type = enum_type
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if int(proposed_value) not in self._enum_type.values_by_number:
raise ValueError('Unknown enum value: %d' % proposed_value)
return proposed_value
def DefaultValue(self):
return self._enum_type.values[0].number
class UnicodeValueChecker(object):
"""Checker used for string fields.
Always returns a unicode value, even if the input is of type str.
"""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (bytes, str)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (bytes, str)))
raise TypeError(message)
# If the value is of type 'bytes' make sure that it is valid UTF-8 data.
if isinstance(proposed_value, bytes):
try:
proposed_value = proposed_value.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 '
'encoding. Non-UTF-8 strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
else:
try:
proposed_value.encode('utf8')
except UnicodeEncodeError:
raise ValueError('%.1024r isn\'t a valid unicode string and '
'can\'t be encoded in UTF-8.'%
(proposed_value))
return proposed_value
def DefaultValue(self):
return u""
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# The max 4 bytes float is about 3.4028234663852886e+38
_FLOAT_MAX = float.fromhex('0x1.fffffep+127')
_FLOAT_MIN = -_FLOAT_MAX
_INF = float('inf')
_NEG_INF = float('-inf')
class FloatValueChecker(object):
"""Checker used for float fields. Performs type-check and range check.
Values exceeding a 32-bit float will be converted to inf/-inf.
"""
def CheckValue(self, proposed_value):
"""Check and convert proposed_value to float."""
if not isinstance(proposed_value, numbers.Real):
message = ('%.1024r has type %s, but expected one of: numbers.Real' %
(proposed_value, type(proposed_value)))
raise TypeError(message)
converted_value = float(proposed_value)
# This inf rounding matches the C++ proto SafeDoubleToFloat logic.
if converted_value > _FLOAT_MAX:
return _INF
if converted_value < _FLOAT_MIN:
return _NEG_INF
return TruncateToFourByteFloat(converted_value)
def DefaultValue(self):
return 0.0
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeCheckerWithDefault(
0.0, float, numbers.Real),
_FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(),
_FieldDescriptor.CPPTYPE_BOOL: TypeCheckerWithDefault(
False, bool, numbers.Integral),
_FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| 40.530414 | 81 | 0.742466 |
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = 'robinson@google.com (Will Robinson)'
import ctypes
import numbers
from google.protobuf.internal import api_implementation
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def TruncateToFourByteFloat(original):
return ctypes.c_float(original).value
def ToShortestFloat(original):
# All 4 byte floats have between 6 and 9 significant digits, so we
# start with 6 as the lower bound.
# It has to be iterative because use '.9g' directly can not get rid
# of the noises for most values. For example if set a float_field=0.9
# use '.9g' will print 0.899999976.
precision = 6
rounded = float('{0:.{1}g}'.format(original, precision))
while TruncateToFourByteFloat(rounded) != original:
precision += 1
rounded = float('{0:.{1}g}'.format(original, precision))
return rounded
def SupportsOpenEnums(field_descriptor):
return field_descriptor.containing_type.syntax == "proto3"
def GetTypeChecker(field):
if (field.cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field.type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
if field.cpp_type == _FieldDescriptor.CPPTYPE_ENUM:
if SupportsOpenEnums(field):
# When open enums are supported, any int32 can be assigned.
return _VALUE_CHECKERS[_FieldDescriptor.CPPTYPE_INT32]
else:
return EnumValueChecker(field.enum_type)
return _VALUE_CHECKERS[field.cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
class TypeChecker(object):
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
if self._acceptable_types:
if self._acceptable_types[0] in (bool, float):
return self._acceptable_types[0](proposed_value)
return proposed_value
class TypeCheckerWithDefault(TypeChecker):
def __init__(self, default_value, *acceptable_types):
TypeChecker.__init__(self, *acceptable_types)
self._default_value = default_value
def DefaultValue(self):
return self._default_value
class IntValueChecker(object):
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if not self._MIN <= int(proposed_value) <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
proposed_value = int(proposed_value)
return proposed_value
def DefaultValue(self):
return 0
class EnumValueChecker(object):
def __init__(self, enum_type):
self._enum_type = enum_type
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Integral):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int,)))
raise TypeError(message)
if int(proposed_value) not in self._enum_type.values_by_number:
raise ValueError('Unknown enum value: %d' % proposed_value)
return proposed_value
def DefaultValue(self):
return self._enum_type.values[0].number
class UnicodeValueChecker(object):
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (bytes, str)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (bytes, str)))
raise TypeError(message)
if isinstance(proposed_value, bytes):
try:
proposed_value = proposed_value.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('%.1024r has type bytes, but isn\'t valid UTF-8 '
'encoding. Non-UTF-8 strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
else:
try:
proposed_value.encode('utf8')
except UnicodeEncodeError:
raise ValueError('%.1024r isn\'t a valid unicode string and '
'can\'t be encoded in UTF-8.'%
(proposed_value))
return proposed_value
def DefaultValue(self):
return u""
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
_FLOAT_MAX = float.fromhex('0x1.fffffep+127')
_FLOAT_MIN = -_FLOAT_MAX
_INF = float('inf')
_NEG_INF = float('-inf')
class FloatValueChecker(object):
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, numbers.Real):
message = ('%.1024r has type %s, but expected one of: numbers.Real' %
(proposed_value, type(proposed_value)))
raise TypeError(message)
converted_value = float(proposed_value)
if converted_value > _FLOAT_MAX:
return _INF
if converted_value < _FLOAT_MIN:
return _NEG_INF
return TruncateToFourByteFloat(converted_value)
def DefaultValue(self):
return 0.0
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeCheckerWithDefault(
0.0, float, numbers.Real),
_FieldDescriptor.CPPTYPE_FLOAT: FloatValueChecker(),
_FieldDescriptor.CPPTYPE_BOOL: TypeCheckerWithDefault(
False, bool, numbers.Integral),
_FieldDescriptor.CPPTYPE_STRING: TypeCheckerWithDefault(b'', bytes),
}
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| true | true |
f7311f4832eeb38d5c97a3363066842f0f43dfd1 | 3,243 | py | Python | youtube_dl/extractor/libsyn.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 24 | 2017-03-17T10:27:12.000Z | 2022-02-16T05:55:50.000Z | youtube_dl/extractor/libsyn.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 7 | 2017-07-26T08:15:27.000Z | 2018-09-20T12:56:53.000Z | youtube_dl/extractor/libsyn.py | MOODesign/Youtube-videos-Download | 730c0d12a06f349907481570f1f2890251f7a181 | [
"Unlicense"
] | 3 | 2017-03-17T10:27:13.000Z | 2019-01-28T01:19:17.000Z | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class LibsynIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
_TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/6385796/',
'md5': '2a55e75496c790cdeb058e7e6c087746',
'info_dict': {
'id': '6385796',
'ext': 'mp3',
'title': "Champion Minded - Developing a Growth Mindset",
'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.',
'upload_date': '20180320',
'thumbnail': 're:^https?://.*',
},
}, {
'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
'md5': '6c5cb21acd622d754d3b1a92b582ce42',
'info_dict': {
'id': '3727166',
'ext': 'mp3',
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
'upload_date': '20150818',
'thumbnail': 're:^https?://.*',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
podcast_title = self._search_regex(
r'<h3>([^<]+)</h3>', webpage, 'podcast title', default=None)
if podcast_title:
podcast_title = podcast_title.strip()
episode_title = self._search_regex(
r'(?:<div class="episode-title">|<h4>)([^<]+)</', webpage, 'episode title')
if episode_title:
episode_title = episode_title.strip()
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<p\s+id="info_text_body">(.+?)</p>', webpage,
'description', default=None)
if description:
# Strip non-breaking and normal spaces
description = description.replace('\u00A0', ' ').strip()
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
data_json = self._search_regex(r'var\s+playlistItem\s*=\s*(\{.*?\});\n', webpage, 'JSON data block')
data = json.loads(data_json)
formats = [{
'url': data['media_url'],
'format_id': 'main',
}, {
'url': data['media_url_libsyn'],
'format_id': 'libsyn',
}]
thumbnail = data.get('thumbnail_url')
duration = parse_duration(data.get('duration'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': release_date,
'duration': duration,
'formats': formats,
}
| 36.852273 | 185 | 0.573235 |
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
unified_strdate,
)
class LibsynIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://html5-player\.libsyn\.com/embed/episode/id/(?P<id>[0-9]+))'
_TESTS = [{
'url': 'http://html5-player.libsyn.com/embed/episode/id/6385796/',
'md5': '2a55e75496c790cdeb058e7e6c087746',
'info_dict': {
'id': '6385796',
'ext': 'mp3',
'title': "Champion Minded - Developing a Growth Mindset",
'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.',
'upload_date': '20180320',
'thumbnail': 're:^https?://.*',
},
}, {
'url': 'https://html5-player.libsyn.com/embed/episode/id/3727166/height/75/width/200/theme/standard/direction/no/autoplay/no/autonext/no/thumbnail/no/preload/no/no_addthis/no/',
'md5': '6c5cb21acd622d754d3b1a92b582ce42',
'info_dict': {
'id': '3727166',
'ext': 'mp3',
'title': 'Clients From Hell Podcast - How a Sex Toy Company Kickstarted my Freelance Career',
'upload_date': '20150818',
'thumbnail': 're:^https?://.*',
}
}]
def _real_extract(self, url):
m = re.match(self._VALID_URL, url)
video_id = m.group('id')
url = m.group('mainurl')
webpage = self._download_webpage(url, video_id)
podcast_title = self._search_regex(
r'<h3>([^<]+)</h3>', webpage, 'podcast title', default=None)
if podcast_title:
podcast_title = podcast_title.strip()
episode_title = self._search_regex(
r'(?:<div class="episode-title">|<h4>)([^<]+)</', webpage, 'episode title')
if episode_title:
episode_title = episode_title.strip()
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
description = self._html_search_regex(
r'<p\s+id="info_text_body">(.+?)</p>', webpage,
'description', default=None)
if description:
description = description.replace('\u00A0', ' ').strip()
release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False))
data_json = self._search_regex(r'var\s+playlistItem\s*=\s*(\{.*?\});\n', webpage, 'JSON data block')
data = json.loads(data_json)
formats = [{
'url': data['media_url'],
'format_id': 'main',
}, {
'url': data['media_url_libsyn'],
'format_id': 'libsyn',
}]
thumbnail = data.get('thumbnail_url')
duration = parse_duration(data.get('duration'))
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': release_date,
'duration': duration,
'formats': formats,
}
| true | true |
f7311facdc21a3212e9fa9d8178b074821aeaa20 | 1,401 | py | Python | mediagoblin/tools/request.py | stenwt/mediagoblin-quickstart-openshift | 4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2 | [
"CC0-1.0"
] | 1 | 2016-02-10T18:22:42.000Z | 2016-02-10T18:22:42.000Z | mediagoblin/tools/request.py | stenwt/mediagoblin-quickstart-openshift | 4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2 | [
"CC0-1.0"
] | 1 | 2016-04-19T13:03:17.000Z | 2016-04-19T13:03:17.000Z | mediagoblin/tools/request.py | stenwt/mediagoblin-quickstart-openshift | 4a728c4b3b988c59eb9a43ad1ae1ca5edf8bc3c2 | [
"CC0-1.0"
] | null | null | null | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from mediagoblin.db.models import User
_log = logging.getLogger(__name__)
def setup_user_in_request(request):
"""
Examine a request and tack on a request.user parameter if that's
appropriate.
"""
if not request.session.has_key('user_id'):
request.user = None
return
request.user = User.query.get(request.session['user_id'])
if not request.user:
# Something's wrong... this user doesn't exist? Invalidate
# this session.
_log.warn("Killing session for user id %r", request.session['user_id'])
request.session.invalidate()
| 35.025 | 79 | 0.723769 |
import logging
from mediagoblin.db.models import User
_log = logging.getLogger(__name__)
def setup_user_in_request(request):
if not request.session.has_key('user_id'):
request.user = None
return
request.user = User.query.get(request.session['user_id'])
if not request.user:
_log.warn("Killing session for user id %r", request.session['user_id'])
request.session.invalidate()
| true | true |
f731200874a2e61790ea940792017eb5feadab06 | 13,142 | py | Python | survol/lib_export_json.py | rchateauneu/survol | ba66d3ec453b2d9dd3a8dabc6d53f71aa9ba8c78 | [
"BSD-3-Clause"
] | 9 | 2017-10-05T23:36:23.000Z | 2021-08-09T15:40:03.000Z | survol/lib_export_json.py | rchateauneu/survol | ba66d3ec453b2d9dd3a8dabc6d53f71aa9ba8c78 | [
"BSD-3-Clause"
] | 21 | 2018-01-02T09:33:03.000Z | 2018-08-27T11:09:52.000Z | survol/lib_export_json.py | rchateauneu/survol | ba66d3ec453b2d9dd3a8dabc6d53f71aa9ba8c78 | [
"BSD-3-Clause"
] | 4 | 2018-06-23T09:05:45.000Z | 2021-01-22T15:36:50.000Z | import sys
import six
import os
import json
import logging
import lib_kbase
import lib_patterns
import lib_naming
import lib_util
from lib_properties import pc
import lib_exports
_node_json_number = 0
class NodeJson:
"""This models a node as it will be saved to Json."""
# TODO: This creates a useless layer of lookup that could be suppressed.
def __init__(self,rdf_node):
global _node_json_number
subj_str = str(rdf_node)
entity_label, entity_graphic_class, entity_id = lib_naming.ParseEntityUri(
subj_str, long_display=False, force_entity_ip_addr=None)
self.m_label = entity_label.strip()
self.m_class = entity_graphic_class
array_graph_params = lib_patterns.TypeToGraphParams(self.m_class)
# "Graphic_shape","Graphic_colorfill","Graphic_colorbg","Graphic_border","Graphic_is_rounded"
self.m_color = array_graph_params[1]
# TODO: Display the doc in the module with FromModuleToDoc(importedMod,filDfltText):
self.m_info_list = [entity_graphic_class]
self.m_info_dict = dict()
self.m_index = _node_json_number
the_survol_url = lib_util.survol_unescape(rdf_node)
self.m_survol_url = the_survol_url
self.m_survol_universal_alias = lib_exports.NodeToUniversalAlias(rdf_node)
_node_json_number += 1 # One more node.
# Only some scripts and urls are exported to Json.
# The most frequent should come first.
# root=http://mymachine:8000/survol
# url=http://mymachine:8000/survol/class_type_all.py?xid=com.
# url=http://mymachine:8000/survol/objtypes.py
# This must be a tuple because of startswith.
_urls_for_json = (
"/entity.py",
"/entity_wmi.py",
"/entity_wbem.py",
"/entity_info_only.py",
"/objtypes.py",
"/class_type_all.py",
"/class_wbem.py",
"/class_wmi.py",
# TODO: Maybe pass portal_wbem.py and portal_wmi.py ??
)
def _script_for_json(url):
"""
This tells if an URL should appear in the RDF graph displayed by the D3 interface to Survol.
This avoids creating a node for the "seel also" urls which returns another graph.
In other words, it selects URL which designate an instance, not the URL returning a graph about an instance.
On the other hand, scripts returning a graph of informatons about an instance are displayed
in the contextual menu of a node (associated to an instance).
http://mymachine:8000/survol/entity_mime.py?xid=CIM_DataFile.Name=C://smh_installer.log&amp;mode=mime:text/plain
http://mymachine:8000/survol/sources_types/CIM_Directory/file_directory.py?xid=CIM_Directory.Name=C%3A%2F%2Fpkg
"""
if url.startswith(lib_util.uriRoot):
# Where the script starts from.
idx_script = len(lib_util.uriRoot)
# Other scripts are forbidden.
return url.startswith(_urls_for_json, idx_script)
# Foreign scripts are OK.
return True
def _write_json_header(buf_json, with_content_length=False):
"""
This writes to the output a JSON content with the appropriate HTTP header.
It for example used by the Javascript interface, to get a contextual menu.
What must be avoided: Cross-Origin Request Blocked:
The Same Origin Policy disallows reading the remote resource at
http://192.168.0.17/Survol/survol/sources_types/enumerate_CIM_Process.py?xid=.&mode=json.
(Reason: CORS header 'Access-Control-Allow-Origin' missing)
https://stackoverflow.com/questions/5027705/error-in-chrome-content-type-is-not-allowed-by-access-control-allow-headers
The body of the reply is base-64 encoded.
"""
arr_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Methods', 'POST,GET,OPTIONS'),
('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'),
]
# It is difficult to calculate the length because the output is encoded
# in Base64, which takes more room than JSon. And also, at least on Windows,
# each line gets an extra character ("\n\r" ?).
# So it is confusing.
# The reason for adding the length is: When an error is detected, sometimes a second error
# comes immediately after the one, even if the thread (or process ?) quits.
#
# Also, with Chrome and Android, sometimes it is not happy with the length,
# even if we checked it. It works without the length, except if this is an error message.
if with_content_length:
num_lines = buf_json.count("\n")
len_buf = len(buf_json) + num_lines
arr_headers.append(('Content-Length', str(len_buf)))
lib_util.WrtHeader('application/json', arr_headers)
# No text conversion.
lib_util.WrtAsUtf(buf_json)
def write_json_error(message):
"""
This is called only by ErrorMessageHtml when an error is detected and the output format is JSON,
for the D3 Survol interface.
After that, the calling function makes an exit.
The error message is formatted in the standard for returning errors.
http://labs.omniti.com/labs/jsend
"""
logging.warning("WriteJsonError message="+message)
json_err = {"status": "error", "message": message}
# The only case where Content-Length is added.
_write_json_header(json.dumps(json_err, indent=2), True)
def output_rdf_graph_as_json_d3(page_title, error_msg, parameters, grph):
"""
Transforms a RDF graph into a JSON document.
This returns a graph made of Json objects which are suitable for visualisation in the Javascript
interface to Survol, which is based on D3.
"""
# Must be reset to zero between several executions, when run by WSGI.
global _node_json_number
_node_json_number = 0
# It contains a cache because the same nodes may appear several times.
def node_to_json_obj(the_nod):
try:
return node_to_json_obj.dictNod2Json[the_nod]
except KeyError:
json_obj = NodeJson(the_nod)
node_to_json_obj.dictNod2Json[the_nod] = json_obj
return json_obj
node_to_json_obj.dictNod2Json = dict()
links = []
for subj, pred, obj in grph:
# This applies only to entity.py : In rendering based on Json, scripts are not displayed as nodes,
# but in hierarchical menus. The node must not appear at all.
# TODO: Should probably also eliminate pc.property_rdf_data_nolist2 etc ... See lib_client.
if pred == pc.property_script:
logging.debug("continue subj=%s obj=%s",subj,obj)
continue
# Normal data scripts are not accepted. This should apply only to file_directory.py and file_to_mime.py
if not _script_for_json(subj):
continue
if not _script_for_json(obj):
continue
subj_obj = node_to_json_obj(subj)
subj_id = subj_obj.m_survol_url
prop_nam = lib_exports.PropToShortPropNam(pred)
# TODO: BUG: If several nodes for the same properties, only the last one is kept.
if lib_kbase.IsLink(obj):
obj_obj = node_to_json_obj(obj)
obj_id = obj_obj.m_survol_url
links.extend([{'source': subj_id, 'target': obj_id, 'survol_link_prop': prop_nam}])
# TODO: Add the name corresponding to the URL, in m_info_dict so that some elements
# of the tooltip would be clickable. On the other hand, one just need to merge
# the nodes relative to the object, by right-clicking.
elif lib_kbase.IsLiteral(obj):
if pred == pc.property_information:
try:
subj_obj.m_info_list.append(str(obj.value))
except UnicodeEncodeError:
# 'ascii' codec can't encode character u'\xf3' in position 17: ordinal not in range(128)
# https://stackoverflow.com/questions/9942594/unicodeencodeerror-ascii-codec-cant-encode-character-u-xa0-in-position-20
subj_obj.m_info_list.append(obj.value.encode('utf-8'))
else:
if isinstance(obj.value, six.integer_types) or isinstance(obj.value, six.string_types):
subj_obj.m_info_dict[prop_nam] = obj.value
else:
# If the value cannot be serializable to JSON.
subj_obj.m_info_dict[prop_nam] = type(obj.value).__name__
else:
raise Exception(__file__ + " Cannot happen here")
# Now, this creates the nodes sent as json objects.
num_nodes = len(node_to_json_obj.dictNod2Json)
nodes = [None] * num_nodes
for nod in node_to_json_obj.dictNod2Json:
nod_obj = node_to_json_obj.dictNod2Json[nod]
nod_titl = nod_obj.m_label
nod_id = nod_obj.m_index
# The URL must not contain any HTML entities when in a XML or SVG document,
# and therefore must be escaped. Therefore they have to be unescaped when transmitted in JSON.
# This is especially needed for RabbitMQ because the parameter defining its connection name
# has the form: "Url=LOCALHOST:12345,Connection=127.0.0.1:51748 -> 127.0.0.1:5672"
# HTTP_MIME_URL
the_survol_nam = lib_util.survol_unescape(nod_titl) # MUST UNESCAPE HTML ENTITIES !
# TODO: Use the same object for lookup and Json.
nodes[nod_id] = {
'id' : nod_obj.m_survol_url, # Required by D3
'name' : the_survol_nam,
# Theoretically, this URL should be HTML unescaped then CGI escaped.
'survol_url' : nod_obj.m_survol_url, # Duplicate of 'id'
'survol_universal_alias' : nod_obj.m_survol_universal_alias,
'survol_fill' : nod_obj.m_color,
'entity_class' : nod_obj.m_class, # TODO: Maybe not needed because also in the URL ?
'survol_info_list' : nod_obj.m_info_list,
'survol_info_dict' : nod_obj.m_info_dict
}
# This is the graph displayed by D3.
graph = {
"page_title": page_title,
"nodes": nodes,
"links": links}
_write_json_header(json.dumps(graph, indent=2))
def output_rdf_graph_as_json_menu(page_title, error_msg, parameters, grph):
"""
This returns a tree of scripts, usable as the contextual menu of a node displayed
in the D3 Javascript interface to Survol.
The RDF content is already created, so this keeps only the nodes related to scripts.
TODO: It would be faster to keep only the tree of scripts. The script "entity.py"
should have a different output when mode=json.
It does not return a network but a tree to be displayed in a contextual menu.
It has a completely different layout as a normal RDF transformed into JSON,
so probably the URL should be different as well.
Input example: "http://127.0.0.1:8000/survol/entity.py?xid=CIM_Process.Handle=3812&mode=json"
"""
# TODO: Should add WBEM and WMI ?
# For each node, the subscripts. Therefore it can only be a directory.
nodes_to_items = {}
# Nodes of scripts which have a parent.
nodes_with_parent = set()
# Later used to calculate the list of scripts which do not have a parent
# directory: They will be displayed at the top of the contextual menu.
subject_nodes = set()
# The name of each node.
nodes_to_names = dict()
for subj, pred, obj in grph:
if pred == pc.property_script:
try:
nodes_to_items[subj].append(obj)
except KeyError:
nodes_to_items[subj] = [obj]
if lib_kbase.IsLiteral(obj):
# This is the name of a subdirectory containing scripts.
nodes_to_names[obj] = obj
nodes_with_parent.add(obj)
subject_nodes.add(subj)
elif pred == pc.property_information:
if lib_kbase.IsLiteral(obj):
nodes_to_names[subj] = obj.value
else:
raise Exception("Cannot happen here also")
else:
pass
top_level_nodes = subject_nodes - nodes_with_parent
# The output result must be sorted.
def add_stuff(the_nod_list, depth=0):
list_json_items = {}
for one_rdf_nod in the_nod_list:
one_json_nod = {
"name": nodes_to_names.get(one_rdf_nod, "No name"),
"url": one_rdf_nod}
# This should be the sort key.
# Maybe it does not have subitems.
try:
lst_item = nodes_to_items[one_rdf_nod]
one_json_nod["items"] = add_stuff(lst_item, depth+1)
except KeyError:
pass
list_json_items[one_rdf_nod] = one_json_nod
return list_json_items
menu_json = add_stuff(top_level_nodes)
# There is only one top-level element.
one_menu_val = {}
for one_menu_key in menu_json:
one_menu_val = menu_json[one_menu_key]["items"]
break
# Writes the content to the HTTP client.
_write_json_header(json.dumps(one_menu_val, sort_keys=True, indent=2))
| 39.465465 | 139 | 0.666489 | import sys
import six
import os
import json
import logging
import lib_kbase
import lib_patterns
import lib_naming
import lib_util
from lib_properties import pc
import lib_exports
_node_json_number = 0
class NodeJson:
def __init__(self,rdf_node):
global _node_json_number
subj_str = str(rdf_node)
entity_label, entity_graphic_class, entity_id = lib_naming.ParseEntityUri(
subj_str, long_display=False, force_entity_ip_addr=None)
self.m_label = entity_label.strip()
self.m_class = entity_graphic_class
array_graph_params = lib_patterns.TypeToGraphParams(self.m_class)
self.m_color = array_graph_params[1]
self.m_info_list = [entity_graphic_class]
self.m_info_dict = dict()
self.m_index = _node_json_number
the_survol_url = lib_util.survol_unescape(rdf_node)
self.m_survol_url = the_survol_url
self.m_survol_universal_alias = lib_exports.NodeToUniversalAlias(rdf_node)
_node_json_number += 1
_urls_for_json = (
"/entity.py",
"/entity_wmi.py",
"/entity_wbem.py",
"/entity_info_only.py",
"/objtypes.py",
"/class_type_all.py",
"/class_wbem.py",
"/class_wmi.py",
)
def _script_for_json(url):
if url.startswith(lib_util.uriRoot):
idx_script = len(lib_util.uriRoot)
return url.startswith(_urls_for_json, idx_script)
return True
def _write_json_header(buf_json, with_content_length=False):
arr_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Methods', 'POST,GET,OPTIONS'),
('Access-Control-Allow-Headers', 'Origin, X-Requested-With, Content-Type, Accept'),
]
if with_content_length:
num_lines = buf_json.count("\n")
len_buf = len(buf_json) + num_lines
arr_headers.append(('Content-Length', str(len_buf)))
lib_util.WrtHeader('application/json', arr_headers)
lib_util.WrtAsUtf(buf_json)
def write_json_error(message):
logging.warning("WriteJsonError message="+message)
json_err = {"status": "error", "message": message}
_write_json_header(json.dumps(json_err, indent=2), True)
def output_rdf_graph_as_json_d3(page_title, error_msg, parameters, grph):
global _node_json_number
_node_json_number = 0
def node_to_json_obj(the_nod):
try:
return node_to_json_obj.dictNod2Json[the_nod]
except KeyError:
json_obj = NodeJson(the_nod)
node_to_json_obj.dictNod2Json[the_nod] = json_obj
return json_obj
node_to_json_obj.dictNod2Json = dict()
links = []
for subj, pred, obj in grph:
if pred == pc.property_script:
logging.debug("continue subj=%s obj=%s",subj,obj)
continue
if not _script_for_json(subj):
continue
if not _script_for_json(obj):
continue
subj_obj = node_to_json_obj(subj)
subj_id = subj_obj.m_survol_url
prop_nam = lib_exports.PropToShortPropNam(pred)
if lib_kbase.IsLink(obj):
obj_obj = node_to_json_obj(obj)
obj_id = obj_obj.m_survol_url
links.extend([{'source': subj_id, 'target': obj_id, 'survol_link_prop': prop_nam}])
elif lib_kbase.IsLiteral(obj):
if pred == pc.property_information:
try:
subj_obj.m_info_list.append(str(obj.value))
except UnicodeEncodeError:
# https://stackoverflow.com/questions/9942594/unicodeencodeerror-ascii-codec-cant-encode-character-u-xa0-in-position-20
subj_obj.m_info_list.append(obj.value.encode('utf-8'))
else:
if isinstance(obj.value, six.integer_types) or isinstance(obj.value, six.string_types):
subj_obj.m_info_dict[prop_nam] = obj.value
else:
# If the value cannot be serializable to JSON.
subj_obj.m_info_dict[prop_nam] = type(obj.value).__name__
else:
raise Exception(__file__ + " Cannot happen here")
# Now, this creates the nodes sent as json objects.
num_nodes = len(node_to_json_obj.dictNod2Json)
nodes = [None] * num_nodes
for nod in node_to_json_obj.dictNod2Json:
nod_obj = node_to_json_obj.dictNod2Json[nod]
nod_titl = nod_obj.m_label
nod_id = nod_obj.m_index
# The URL must not contain any HTML entities when in a XML or SVG document,
# and therefore must be escaped. Therefore they have to be unescaped when transmitted in JSON.
# This is especially needed for RabbitMQ because the parameter defining its connection name
# has the form: "Url=LOCALHOST:12345,Connection=127.0.0.1:51748 -> 127.0.0.1:5672"
# HTTP_MIME_URL
the_survol_nam = lib_util.survol_unescape(nod_titl) # MUST UNESCAPE HTML ENTITIES !
# TODO: Use the same object for lookup and Json.
nodes[nod_id] = {
'id' : nod_obj.m_survol_url, # Required by D3
'name' : the_survol_nam,
# Theoretically, this URL should be HTML unescaped then CGI escaped.
'survol_url' : nod_obj.m_survol_url, # Duplicate of 'id'
'survol_universal_alias' : nod_obj.m_survol_universal_alias,
'survol_fill' : nod_obj.m_color,
'entity_class' : nod_obj.m_class, # TODO: Maybe not needed because also in the URL ?
'survol_info_list' : nod_obj.m_info_list,
'survol_info_dict' : nod_obj.m_info_dict
}
# This is the graph displayed by D3.
graph = {
"page_title": page_title,
"nodes": nodes,
"links": links}
_write_json_header(json.dumps(graph, indent=2))
def output_rdf_graph_as_json_menu(page_title, error_msg, parameters, grph):
# TODO: Should add WBEM and WMI ?
# For each node, the subscripts. Therefore it can only be a directory.
nodes_to_items = {}
# Nodes of scripts which have a parent.
nodes_with_parent = set()
# Later used to calculate the list of scripts which do not have a parent
# directory: They will be displayed at the top of the contextual menu.
subject_nodes = set()
# The name of each node.
nodes_to_names = dict()
for subj, pred, obj in grph:
if pred == pc.property_script:
try:
nodes_to_items[subj].append(obj)
except KeyError:
nodes_to_items[subj] = [obj]
if lib_kbase.IsLiteral(obj):
# This is the name of a subdirectory containing scripts.
nodes_to_names[obj] = obj
nodes_with_parent.add(obj)
subject_nodes.add(subj)
elif pred == pc.property_information:
if lib_kbase.IsLiteral(obj):
nodes_to_names[subj] = obj.value
else:
raise Exception("Cannot happen here also")
else:
pass
top_level_nodes = subject_nodes - nodes_with_parent
# The output result must be sorted.
def add_stuff(the_nod_list, depth=0):
list_json_items = {}
for one_rdf_nod in the_nod_list:
one_json_nod = {
"name": nodes_to_names.get(one_rdf_nod, "No name"),
"url": one_rdf_nod}
# This should be the sort key.
# Maybe it does not have subitems.
try:
lst_item = nodes_to_items[one_rdf_nod]
one_json_nod["items"] = add_stuff(lst_item, depth+1)
except KeyError:
pass
list_json_items[one_rdf_nod] = one_json_nod
return list_json_items
menu_json = add_stuff(top_level_nodes)
# There is only one top-level element.
one_menu_val = {}
for one_menu_key in menu_json:
one_menu_val = menu_json[one_menu_key]["items"]
break
# Writes the content to the HTTP client.
_write_json_header(json.dumps(one_menu_val, sort_keys=True, indent=2))
| true | true |
f73121b08999df30d985f558b20f79d8fc680663 | 8,052 | py | Python | ietf/group/tests_js.py | hassanakbar4/ietfdb-git | b899ee18604e878fb4133ef38cfeb6af781ce116 | [
"BSD-3-Clause"
] | null | null | null | ietf/group/tests_js.py | hassanakbar4/ietfdb-git | b899ee18604e878fb4133ef38cfeb6af781ce116 | [
"BSD-3-Clause"
] | null | null | null | ietf/group/tests_js.py | hassanakbar4/ietfdb-git | b899ee18604e878fb4133ef38cfeb6af781ce116 | [
"BSD-3-Clause"
] | 1 | 2021-10-05T12:49:27.000Z | 2021-10-05T12:49:27.000Z | # Copyright The IETF Trust 2021, All Rights Reserved
# -*- coding: utf-8 -*-
import datetime
import debug # pyflakes:ignore
from ietf.doc.factories import WgDraftFactory
from ietf.group.factories import GroupFactory, RoleFactory, DatedGroupMilestoneFactory
from ietf.utils.jstest import IetfSeleniumTestCase, ifSeleniumEnabled, selenium_enabled
if selenium_enabled():
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
@ifSeleniumEnabled
class MilestoneTests(IetfSeleniumTestCase):
def setUp(self):
super(MilestoneTests, self).setUp()
self.wait = WebDriverWait(self.driver, 2)
self.group = GroupFactory()
self.chair = RoleFactory(group=self.group, name_id='chair').person
def _search_draft_and_locate_result(self, draft_input, search_string, draft):
"""Search for a draft and get the search result element"""
draft_input.send_keys(search_string)
result_selector = 'ul.select2-results > li > div.select2-result-label'
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, result_selector),
draft.name
))
results = self.driver.find_elements_by_css_selector(result_selector)
matching_results = [r for r in results if draft.name in r.text]
self.assertEqual(len(matching_results), 1)
return matching_results[0]
def _click_milestone_submit_button(self, label):
submit_button_selector = 'form#milestones-form button[type="submit"]'
submit_button = self.wait.until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, submit_button_selector))
)
self.assertIn(label, submit_button.text)
self.scroll_to_element(submit_button)
submit_button.click()
def _assert_milestone_changed(self):
"""Wait for milestone to be marked as changed and assert that this succeeded"""
milestone_selector = 'form#milestones-form .milestone'
try:
found_expected_text = self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, milestone_selector),
'Changed'
)
)
except TimeoutException:
found_expected_text = False
self.assertTrue(found_expected_text, 'Milestone never marked as "changed"')
return self.driver.find_element_by_css_selector(milestone_selector)
def test_add_milestone(self):
draft = WgDraftFactory()
WgDraftFactory.create_batch(3) # some drafts to ignore
description = 'some description'
due_date = datetime.date.today() + datetime.timedelta(days=60)
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
self.login(self.chair.user.username)
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.driver.get(url)
add_milestone_button = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'button.add-milestone')
))
self.scroll_to_element(add_milestone_button)
add_milestone_button.click()
edit_div = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'form#milestones-form div.edit-milestone')
))
desc_input = edit_div.find_element_by_css_selector('input[id$="_desc"]')
due_input = edit_div.find_element_by_css_selector('input[id$="_due"]')
draft_input = edit_div.find_element_by_css_selector(
'div.select2-container[id$="id_docs"] input.select2-input'
)
# fill in the edit milestone form
desc_input.send_keys(description)
due_input.send_keys(due_date.strftime('%m %Y\n')) # \n closes the date selector
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
result_row = self._assert_milestone_changed()
self.assertIn(description, result_row.text)
self._click_milestone_submit_button('Save')
# Wait for page to return to group page
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
self.assertIn('1 new milestone', self.driver.page_source)
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, description)
self.assertEqual(gms.due.strftime('%m %Y'), due_date.strftime('%m %Y'))
self.assertEqual(list(gms.docs.all()), [draft])
def test_edit_milestone(self):
milestone = DatedGroupMilestoneFactory(group=self.group)
draft = WgDraftFactory()
WgDraftFactory.create_batch(3) # some drafts to ignore
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.login(self.chair.user.username)
self.driver.get(url)
# should only be one milestone row - test will fail later if we somehow get the wrong one
edit_element = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'form#milestones-form div.milestonerow')
)
)
edit_element.click()
# find the description field corresponding to our milestone
desc_field = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'input[value="%s"]' % milestone.desc)
)
)
# Get the prefix used to identify inputs related to this milestone
prefix = desc_field.get_attribute('id')[:-4] # -4 to strip off 'desc', leave '-'
due_field = self.driver.find_element_by_id(prefix + 'due')
hidden_drafts_field = self.driver.find_element_by_id(prefix + 'docs')
draft_input = self.driver.find_element_by_css_selector(
'div.select2-container[id*="%s"] input.select2-input' % prefix
)
self.assertEqual(due_field.get_attribute('value'), milestone.due.strftime('%B %Y'))
self.assertEqual(hidden_drafts_field.get_attribute('value'),
','.join([str(doc.pk) for doc in milestone.docs.all()]))
# modify the fields
new_due_date = (milestone.due + datetime.timedelta(days=31)).strftime('%m %Y')
due_field.clear()
due_field.send_keys(new_due_date + '\n')
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
self._assert_milestone_changed()
self._click_milestone_submit_button('Save')
# Wait for page to return to group page
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
expected_desc = milestone.desc
expected_due_date = new_due_date
expected_docs = [draft]
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, expected_desc)
self.assertEqual(gms.due.strftime('%m %Y'), expected_due_date)
self.assertCountEqual(expected_docs, gms.docs.all())
| 42.603175 | 98 | 0.656607 |
import datetime
import debug
from ietf.doc.factories import WgDraftFactory
from ietf.group.factories import GroupFactory, RoleFactory, DatedGroupMilestoneFactory
from ietf.utils.jstest import IetfSeleniumTestCase, ifSeleniumEnabled, selenium_enabled
if selenium_enabled():
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
@ifSeleniumEnabled
class MilestoneTests(IetfSeleniumTestCase):
def setUp(self):
super(MilestoneTests, self).setUp()
self.wait = WebDriverWait(self.driver, 2)
self.group = GroupFactory()
self.chair = RoleFactory(group=self.group, name_id='chair').person
def _search_draft_and_locate_result(self, draft_input, search_string, draft):
draft_input.send_keys(search_string)
result_selector = 'ul.select2-results > li > div.select2-result-label'
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, result_selector),
draft.name
))
results = self.driver.find_elements_by_css_selector(result_selector)
matching_results = [r for r in results if draft.name in r.text]
self.assertEqual(len(matching_results), 1)
return matching_results[0]
def _click_milestone_submit_button(self, label):
submit_button_selector = 'form#milestones-form button[type="submit"]'
submit_button = self.wait.until(
expected_conditions.element_to_be_clickable((By.CSS_SELECTOR, submit_button_selector))
)
self.assertIn(label, submit_button.text)
self.scroll_to_element(submit_button)
submit_button.click()
def _assert_milestone_changed(self):
milestone_selector = 'form#milestones-form .milestone'
try:
found_expected_text = self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, milestone_selector),
'Changed'
)
)
except TimeoutException:
found_expected_text = False
self.assertTrue(found_expected_text, 'Milestone never marked as "changed"')
return self.driver.find_element_by_css_selector(milestone_selector)
def test_add_milestone(self):
draft = WgDraftFactory()
WgDraftFactory.create_batch(3)
description = 'some description'
due_date = datetime.date.today() + datetime.timedelta(days=60)
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
self.login(self.chair.user.username)
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.driver.get(url)
add_milestone_button = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'button.add-milestone')
))
self.scroll_to_element(add_milestone_button)
add_milestone_button.click()
edit_div = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'form#milestones-form div.edit-milestone')
))
desc_input = edit_div.find_element_by_css_selector('input[id$="_desc"]')
due_input = edit_div.find_element_by_css_selector('input[id$="_due"]')
draft_input = edit_div.find_element_by_css_selector(
'div.select2-container[id$="id_docs"] input.select2-input'
)
desc_input.send_keys(description)
due_input.send_keys(due_date.strftime('%m %Y\n'))
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
result_row = self._assert_milestone_changed()
self.assertIn(description, result_row.text)
self._click_milestone_submit_button('Save')
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
self.assertIn('1 new milestone', self.driver.page_source)
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, description)
self.assertEqual(gms.due.strftime('%m %Y'), due_date.strftime('%m %Y'))
self.assertEqual(list(gms.docs.all()), [draft])
def test_edit_milestone(self):
milestone = DatedGroupMilestoneFactory(group=self.group)
draft = WgDraftFactory()
WgDraftFactory.create_batch(3)
assert(len(draft.name) > 5)
draft_search_string = draft.name[-5:]
url = self.absreverse('ietf.group.milestones.edit_milestones;current',
kwargs=dict(acronym=self.group.acronym))
self.login(self.chair.user.username)
self.driver.get(url)
edit_element = self.wait.until(
expected_conditions.element_to_be_clickable(
(By.CSS_SELECTOR, 'form#milestones-form div.milestonerow')
)
)
edit_element.click()
desc_field = self.wait.until(
expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, 'input[value="%s"]' % milestone.desc)
)
)
prefix = desc_field.get_attribute('id')[:-4]
due_field = self.driver.find_element_by_id(prefix + 'due')
hidden_drafts_field = self.driver.find_element_by_id(prefix + 'docs')
draft_input = self.driver.find_element_by_css_selector(
'div.select2-container[id*="%s"] input.select2-input' % prefix
)
self.assertEqual(due_field.get_attribute('value'), milestone.due.strftime('%B %Y'))
self.assertEqual(hidden_drafts_field.get_attribute('value'),
','.join([str(doc.pk) for doc in milestone.docs.all()]))
new_due_date = (milestone.due + datetime.timedelta(days=31)).strftime('%m %Y')
due_field.clear()
due_field.send_keys(new_due_date + '\n')
self._search_draft_and_locate_result(draft_input, draft_search_string, draft).click()
self._click_milestone_submit_button('Review')
self._assert_milestone_changed()
self._click_milestone_submit_button('Save')
self.wait.until(
expected_conditions.text_to_be_present_in_element(
(By.CSS_SELECTOR, 'div#content h1'),
self.group.name
)
)
expected_desc = milestone.desc
expected_due_date = new_due_date
expected_docs = [draft]
self.assertEqual(self.group.groupmilestone_set.count(), 1)
gms = self.group.groupmilestone_set.first()
self.assertEqual(gms.desc, expected_desc)
self.assertEqual(gms.due.strftime('%m %Y'), expected_due_date)
self.assertCountEqual(expected_docs, gms.docs.all())
| true | true |
f73122345854561a45917cbc6b8d22293f08b06b | 1,681 | py | Python | src/pygetwindow/__init__.py | EMOholcicka/PyGetWindow | 55743692fadd5faca330f1d5f9aa1b4ade20d786 | [
"BSD-3-Clause"
] | 1 | 2018-12-18T15:15:21.000Z | 2018-12-18T15:15:21.000Z | src/pygetwindow/__init__.py | EMOholcicka/PyGetWindow | 55743692fadd5faca330f1d5f9aa1b4ade20d786 | [
"BSD-3-Clause"
] | 3 | 2019-01-17T01:55:16.000Z | 2019-02-21T16:27:35.000Z | src/pygetwindow/__init__.py | EMOholcicka/PyGetWindow | 55743692fadd5faca330f1d5f9aa1b4ade20d786 | [
"BSD-3-Clause"
] | 1 | 2019-01-16T21:51:08.000Z | 2019-01-16T21:51:08.000Z | # PyGetWindow
# A cross-platform module to find information about the windows on the screen.
"""
# Work in progress
# Useful info:
#https://stackoverflow.com/questions/373020/finding-the-current-active-window-in-mac-os-x-using-python
#https://stackoverflow.com/questions/7142342/get-window-position-size-with-python
win32 api and ctypes on Windows
cocoa api and pyobjc on Mac
Xlib on linux
Possible Future Features:
get/click menu (win32: GetMenuItemCount, GetMenuItemInfo, GetMenuItemID, GetMenu, GetMenuItemRect)
"""
__version__ = '0.0.4'
import sys
import collections
class PyGetWindowException(Exception):
pass
def pointInRect(x, y, left, top, width, height):
return left < x < left + width and top < y < top + height
if sys.platform == 'darwin':
raise NotImplementedError('PyGetWindow currently does not support macOS. If you have Appkit/Cocoa knowledge, please contribute! https://github.com/asweigart/pygetwindow') # TODO - implement mac
elif sys.platform == 'win32':
from ._pygetwindow_win import Win32Window, getActiveWindow, getWindowsAt, getWindowsWithTitle, getAllWindows, getAllTitles
Window = Win32Window
else:
raise NotImplementedError('PyGetWindow currently does not support Linux. If you have Xlib knowledge, please contribute! https://github.com/asweigart/pygetwindow')
# NOTE: `Rect` is a named tuple for use in Python, while structs.RECT represents
# the win32 RECT struct. PyRect's Rect class is used for handling changing
# geometry of rectangular areas.
Rect = collections.namedtuple('Rect', 'left top right bottom')
Point = collections.namedtuple('Point', 'x y')
Size = collections.namedtuple('Size', 'width height') | 33.62 | 197 | 0.766805 |
__version__ = '0.0.4'
import sys
import collections
class PyGetWindowException(Exception):
pass
def pointInRect(x, y, left, top, width, height):
return left < x < left + width and top < y < top + height
if sys.platform == 'darwin':
raise NotImplementedError('PyGetWindow currently does not support macOS. If you have Appkit/Cocoa knowledge, please contribute! https://github.com/asweigart/pygetwindow')
elif sys.platform == 'win32':
from ._pygetwindow_win import Win32Window, getActiveWindow, getWindowsAt, getWindowsWithTitle, getAllWindows, getAllTitles
Window = Win32Window
else:
raise NotImplementedError('PyGetWindow currently does not support Linux. If you have Xlib knowledge, please contribute! https://github.com/asweigart/pygetwindow')
# geometry of rectangular areas.
Rect = collections.namedtuple('Rect', 'left top right bottom')
Point = collections.namedtuple('Point', 'x y')
Size = collections.namedtuple('Size', 'width height') | true | true |
f731233587b1356960a9308c19c84399ba7b1cad | 8,353 | py | Python | manuscript/migrations/0007_auto__add_field_title_num_volumes__add_field_title_editor__add_field_t.py | adamsc64/django-manuscript | 9d17a8a93ddaa789a269dc5683b78f2be84778c1 | [
"MIT"
] | 1 | 2015-06-18T07:21:51.000Z | 2015-06-18T07:21:51.000Z | manuscript/migrations/0007_auto__add_field_title_num_volumes__add_field_title_editor__add_field_t.py | adamsc64/django-manuscript | 9d17a8a93ddaa789a269dc5683b78f2be84778c1 | [
"MIT"
] | null | null | null | manuscript/migrations/0007_auto__add_field_title_num_volumes__add_field_title_editor__add_field_t.py | adamsc64/django-manuscript | 9d17a8a93ddaa789a269dc5683b78f2be84778c1 | [
"MIT"
] | 1 | 2021-11-23T09:21:31.000Z | 2021-11-23T09:21:31.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Title.num_volumes'
db.add_column('manuscript_title', 'num_volumes', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'Title.editor'
db.add_column('manuscript_title', 'editor', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
# Adding field 'Title.publisher'
db.add_column('manuscript_title', 'publisher', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
# Adding field 'Title.place_of_publication'
db.add_column('manuscript_title', 'place_of_publication', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
# Adding field 'Title.title_page'
db.add_column('manuscript_title', 'title_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='title_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
# Adding field 'Title.copyright_page'
db.add_column('manuscript_title', 'copyright_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='copyright_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
# Adding field 'Title.original_publication_title'
db.add_column('manuscript_title', 'original_publication_title', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['manuscript.Title'], unique=True, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Title.num_volumes'
db.delete_column('manuscript_title', 'num_volumes')
# Deleting field 'Title.editor'
db.delete_column('manuscript_title', 'editor')
# Deleting field 'Title.publisher'
db.delete_column('manuscript_title', 'publisher')
# Deleting field 'Title.place_of_publication'
db.delete_column('manuscript_title', 'place_of_publication')
# Deleting field 'Title.title_page'
db.delete_column('manuscript_title', 'title_page_id')
# Deleting field 'Title.copyright_page'
db.delete_column('manuscript_title', 'copyright_page_id')
# Deleting field 'Title.original_publication_title'
db.delete_column('manuscript_title', 'original_publication_title_id')
models = {
'manuscript.author': {
'Meta': {'object_name': 'Author'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'manuscript.chapter': {
'Meta': {'object_name': 'Chapter'},
'heading': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '70', 'blank': 'True'}),
'start_page_no': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"}),
'xml_chapter_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'})
},
'manuscript.compositeparagraph': {
'Meta': {'object_name': 'CompositeParagraph'},
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['manuscript.Page']", 'symmetrical': 'False'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.page': {
'Meta': {'unique_together': "(('title', 'number'),)", 'object_name': 'Page'},
'display': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'scan': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"})
},
'manuscript.paragraph': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Paragraph'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'composite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.CompositeParagraph']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'old_page_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Page']"}),
'split': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.sitecopytext': {
'Meta': {'object_name': 'SiteCopyText'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'value': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'manuscript.title': {
'Meta': {'object_name': 'Title'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Author']"}),
'copyright_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'copyright_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'editor': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_volumes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'original_publication_title': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['manuscript.Title']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {}),
'place_of_publication': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '70', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'title_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'title_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'volume': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['manuscript']
| 64.253846 | 234 | 0.600024 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('manuscript_title', 'num_volumes', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
db.add_column('manuscript_title', 'editor', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
db.add_column('manuscript_title', 'publisher', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
db.add_column('manuscript_title', 'place_of_publication', self.gf('django.db.models.fields.CharField')(default='', max_length=70, blank=True), keep_default=False)
db.add_column('manuscript_title', 'title_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='title_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
db.add_column('manuscript_title', 'copyright_page', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='copyright_page_of', unique=True, null=True, to=orm['manuscript.Page']), keep_default=False)
db.add_column('manuscript_title', 'original_publication_title', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['manuscript.Title'], unique=True, null=True, blank=True), keep_default=False)
def backwards(self, orm):
db.delete_column('manuscript_title', 'num_volumes')
db.delete_column('manuscript_title', 'editor')
db.delete_column('manuscript_title', 'publisher')
db.delete_column('manuscript_title', 'place_of_publication')
db.delete_column('manuscript_title', 'title_page_id')
db.delete_column('manuscript_title', 'copyright_page_id')
db.delete_column('manuscript_title', 'original_publication_title_id')
models = {
'manuscript.author': {
'Meta': {'object_name': 'Author'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'manuscript.chapter': {
'Meta': {'object_name': 'Chapter'},
'heading': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '70', 'blank': 'True'}),
'start_page_no': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"}),
'xml_chapter_id': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'})
},
'manuscript.compositeparagraph': {
'Meta': {'object_name': 'CompositeParagraph'},
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'pages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['manuscript.Page']", 'symmetrical': 'False'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.page': {
'Meta': {'unique_together': "(('title', 'number'),)", 'object_name': 'Page'},
'display': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'scan': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Title']"})
},
'manuscript.paragraph': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Paragraph'},
'_order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'chapter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Chapter']"}),
'composite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.CompositeParagraph']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'old_page_number': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Page']"}),
'split': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'manuscript.sitecopytext': {
'Meta': {'object_name': 'SiteCopyText'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'value': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'manuscript.title': {
'Meta': {'object_name': 'Title'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['manuscript.Author']"}),
'copyright_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'copyright_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'editor': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_volumes': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'old_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'original_publication_title': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['manuscript.Title']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {}),
'place_of_publication': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '70', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '70', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'title_page': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'title_page_of'", 'unique': 'True', 'null': 'True', 'to': "orm['manuscript.Page']"}),
'volume': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['manuscript']
| true | true |
f731236ba73b3b9e12b15dec7926dc5c3b69bd93 | 1,359 | py | Python | jpype/JavaLib.py | Hi-Fi/robotframework-after-jython | 1c4956f49fa28bad1156d8243456f8f102eeb8cb | [
"MIT"
] | null | null | null | jpype/JavaLib.py | Hi-Fi/robotframework-after-jython | 1c4956f49fa28bad1156d8243456f8f102eeb8cb | [
"MIT"
] | null | null | null | jpype/JavaLib.py | Hi-Fi/robotframework-after-jython | 1c4956f49fa28bad1156d8243456f8f102eeb8cb | [
"MIT"
] | null | null | null | """Main library."""
from typing import Optional
# Import module
import jpype
# Enable Java imports
import jpype.imports
# Pull in types
from jpype.types import *
import importlib
class JavaLib:
ROBOT_LIBRARY_SCOPE = "GLOBAL"
"""General library documentation."""
def __init__(
self,
library: str,
classpath: Optional[str] = None):
if jpype.isJVMStarted():
print("JVM running")
else:
jpype.startJVM(classpath=classpath.split(":"))
JavaLibrary = importlib.import_module(library)
self.javaLibrary = JavaLibrary()
def get_keyword_names(self):
keywords = []
# AnnotationLibrary return Java's ArrayList with Java's Strings, converting to Python
for keyword in self.javaLibrary.getKeywordNames():
keywords.append(str(keyword))
return keywords
def run_keyword(self, keyword: str, args, kwargs):
import java
return self.javaLibrary.runKeyword(JString(keyword), java.util.ArrayList(args), java.util.HashMap(kwargs))
def get_keyword_documentation(self, keyword: str):
try:
# AnnotationLibrary returns java.lang.String
documentation = str(self.javaLibrary.getKeywordDocumentation(keyword))
except:
documentation = ""
return documentation
| 27.18 | 114 | 0.656365 |
from typing import Optional
import jpype
import jpype.imports
from jpype.types import *
import importlib
class JavaLib:
ROBOT_LIBRARY_SCOPE = "GLOBAL"
def __init__(
self,
library: str,
classpath: Optional[str] = None):
if jpype.isJVMStarted():
print("JVM running")
else:
jpype.startJVM(classpath=classpath.split(":"))
JavaLibrary = importlib.import_module(library)
self.javaLibrary = JavaLibrary()
def get_keyword_names(self):
keywords = []
for keyword in self.javaLibrary.getKeywordNames():
keywords.append(str(keyword))
return keywords
def run_keyword(self, keyword: str, args, kwargs):
import java
return self.javaLibrary.runKeyword(JString(keyword), java.util.ArrayList(args), java.util.HashMap(kwargs))
def get_keyword_documentation(self, keyword: str):
try:
documentation = str(self.javaLibrary.getKeywordDocumentation(keyword))
except:
documentation = ""
return documentation
| true | true |
f7312400eaec87ffea3fdf898814764925b356f6 | 2,267 | py | Python | cogs/pin.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | cogs/pin.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | cogs/pin.py | kdrsrt/mecha | a32047fe6bdc776cad73d95a716c7bfa8f0516ed | [
"MIT"
] | null | null | null | from discord.ext.commands import Cog, command
from discord import Embed, File
from discord.ext import commands
import os, discord
class Pin(Cog):
def __init__(self, bot):
self.bot = bot
self.emoji = "📌"
@Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in payload.member.roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
else:
pers = list(a for a in channel.permissions_for(payload.member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
@Cog.listener()
async def on_raw_reaction_remove(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
member = discord.utils.get(self.bot.get_all_members(), id=payload.user_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in member.roles)
print(member_roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
else:
pers = list(a for a in channel.permissions_for(member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
| 44.45098 | 87 | 0.539038 | from discord.ext.commands import Cog, command
from discord import Embed, File
from discord.ext import commands
import os, discord
class Pin(Cog):
def __init__(self, bot):
self.bot = bot
self.emoji = "📌"
@Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in payload.member.roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
else:
pers = list(a for a in channel.permissions_for(payload.member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if not message.pinned:
await message.pin()
async for x in channel.history(limit = 1):
await x.delete()
@Cog.listener()
async def on_raw_reaction_remove(self, payload):
if payload.emoji.name == self.emoji:
channel = self.bot.get_channel(payload.channel_id)
member = discord.utils.get(self.bot.get_all_members(), id=payload.user_id)
if int(payload.guild_id) == 699224778824745003:
member_roles = list(a.name for a in member.roles)
print(member_roles)
if 'sabitleyici' in member_roles:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
else:
pers = list(a for a in channel.permissions_for(member))
if pers[13][1] == True:
message = await channel.fetch_message(payload.message_id)
if message.pinned:
await message.unpin()
| true | true |
f731245784b2ec4ad02c78b9de42af4e227ee2c1 | 900 | py | Python | test/test_dist.py | bachew/mollusc | 9ae0eff4455b55314c2b3fe153c51403e2affa1c | [
"MIT"
] | null | null | null | test/test_dist.py | bachew/mollusc | 9ae0eff4455b55314c2b3fe153c51403e2affa1c | [
"MIT"
] | 3 | 2017-11-20T06:46:47.000Z | 2019-12-06T07:45:59.000Z | test/test_dist.py | bachew/mollusc | 9ae0eff4455b55314c2b3fe153c51403e2affa1c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from mollusc.dist import Twine
class TestTwine(object):
def test_register_command(self):
twine = Twine(username='registrar', password='reg1strar')
assert twine.get_command('register', 'package.whl', {'-c': 'test register'}) == [
'twine',
'register',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'registrar',
'-c', 'test register',
'package.whl'
]
def test_upload_command(self):
twine = Twine(username='uploader', password='upl0ader')
assert twine.get_command('upload', ['package.whl', 'package.tar.gz'], {'-c': 'test upload'}) == [
'twine',
'upload',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'uploader',
'-c', 'test upload',
'package.whl', 'package.tar.gz'
]
| 33.333333 | 105 | 0.53 |
from mollusc.dist import Twine
class TestTwine(object):
def test_register_command(self):
twine = Twine(username='registrar', password='reg1strar')
assert twine.get_command('register', 'package.whl', {'-c': 'test register'}) == [
'twine',
'register',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'registrar',
'-c', 'test register',
'package.whl'
]
def test_upload_command(self):
twine = Twine(username='uploader', password='upl0ader')
assert twine.get_command('upload', ['package.whl', 'package.tar.gz'], {'-c': 'test upload'}) == [
'twine',
'upload',
'--repository-url', Twine.DEFAULT_REPO_URL,
'-u', 'uploader',
'-c', 'test upload',
'package.whl', 'package.tar.gz'
]
| true | true |
f731257080db5facc50d1cd0d6a59693883c2335 | 107 | py | Python | slack/forms.py | pandabearcoder/pythonph | f0a1b93cd3f6234f1eb2d8eae83a8ad8b6741006 | [
"MIT"
] | 23 | 2015-02-26T04:01:02.000Z | 2021-11-09T01:48:09.000Z | slack/forms.py | pandabearcoder/pythonph | f0a1b93cd3f6234f1eb2d8eae83a8ad8b6741006 | [
"MIT"
] | 32 | 2015-04-27T14:17:16.000Z | 2022-03-11T23:12:03.000Z | slack/forms.py | pandabearcoder/pythonph | f0a1b93cd3f6234f1eb2d8eae83a8ad8b6741006 | [
"MIT"
] | 27 | 2015-02-16T17:00:18.000Z | 2022-03-29T01:01:46.000Z | from django import forms
class SlackInviteForm(forms.Form):
email = forms.EmailField(label="Email")
| 15.285714 | 43 | 0.747664 | from django import forms
class SlackInviteForm(forms.Form):
email = forms.EmailField(label="Email")
| true | true |
f73126514631ade1b408efeff6b45a1afa8ead8e | 124 | py | Python | src/opencmiss/__init__.py | rchristie/opencmiss.argon | c5cf8f313e31fc2f9d647a64ce8694cbb4f9e9cf | [
"Apache-2.0"
] | null | null | null | src/opencmiss/__init__.py | rchristie/opencmiss.argon | c5cf8f313e31fc2f9d647a64ce8694cbb4f9e9cf | [
"Apache-2.0"
] | 2 | 2016-01-15T04:17:35.000Z | 2016-02-26T04:01:02.000Z | src/opencmiss/__init__.py | rchristie/opencmiss.argon | c5cf8f313e31fc2f9d647a64ce8694cbb4f9e9cf | [
"Apache-2.0"
] | 6 | 2015-11-29T20:57:16.000Z | 2021-06-08T04:02:26.000Z |
# OpenCMISS Python package initialisation file.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| 24.8 | 47 | 0.830645 |
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| true | true |
f73126570233a0b7d7357c728250600918f9ec85 | 126 | py | Python | mscreen/autodocktools_prepare_py3k/bhtree/__init__.py | e-mayo/mscreen | a50f0b2f7104007c730baa51b4ec65c891008c47 | [
"MIT"
] | 9 | 2021-03-06T04:24:28.000Z | 2022-01-03T09:53:07.000Z | bhtree/__init__.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 3 | 2021-03-07T05:37:16.000Z | 2021-09-19T15:06:54.000Z | bhtree/__init__.py | e-mayo/autodocktools-prepare-py3k | 2dd2316837bcb7c19384294443b2855e5ccd3e01 | [
"BSD-3-Clause"
] | 4 | 2019-08-28T23:11:39.000Z | 2021-11-27T08:43:36.000Z | from .bhtreelib import *
__MGLTOOLSVersion__ = '1-4alpha3'
CRITICAL_DEPENDENCIES = ['mglutil']
NONCRITICAL_DEPENDENCIES = []
| 21 | 35 | 0.777778 | from .bhtreelib import *
__MGLTOOLSVersion__ = '1-4alpha3'
CRITICAL_DEPENDENCIES = ['mglutil']
NONCRITICAL_DEPENDENCIES = []
| true | true |
f73126e4c01e01f20bce82c5853d0aa6eb16c032 | 656 | py | Python | app/utils/prediction_utils.py | Ukasz09/Clothing-recognition | 9332b0d3eac59782c0e8a72078ba97d67805d512 | [
"MIT"
] | 3 | 2020-06-11T12:38:28.000Z | 2020-11-01T13:26:47.000Z | app/utils/prediction_utils.py | Ukasz09/Clothing-recognition | 9332b0d3eac59782c0e8a72078ba97d67805d512 | [
"MIT"
] | null | null | null | app/utils/prediction_utils.py | Ukasz09/Clothing-recognition | 9332b0d3eac59782c0e8a72078ba97d67805d512 | [
"MIT"
] | null | null | null | import time
import datetime
import numpy as np
__start_time = time.time()
__end_time = time.time()
def calc_accuracy(predicted_labels, real_labels):
correct_qty = 0
for i in range(len(predicted_labels)):
if predicted_labels[i] == real_labels[i]:
correct_qty += 1
return correct_qty * 100 / len(predicted_labels)
def predict_labels(pyx):
"""
:param pyx: matrix with probability distribution p(y|x) for every class and *X_test* object
:return: list with predicted class labels
"""
return [np.argmax(row, axis=0) for row in pyx]
def convert_time(sec):
return str(datetime.timedelta(seconds=sec))
| 24.296296 | 95 | 0.696646 | import time
import datetime
import numpy as np
__start_time = time.time()
__end_time = time.time()
def calc_accuracy(predicted_labels, real_labels):
correct_qty = 0
for i in range(len(predicted_labels)):
if predicted_labels[i] == real_labels[i]:
correct_qty += 1
return correct_qty * 100 / len(predicted_labels)
def predict_labels(pyx):
return [np.argmax(row, axis=0) for row in pyx]
def convert_time(sec):
return str(datetime.timedelta(seconds=sec))
| true | true |
f73127413286956e4030d84fb610da04ec29aad6 | 8,529 | py | Python | saleor/discount/models.py | dnordio/saleor | 323963748e6a2702265ec6635b930a234abde4f5 | [
"BSD-3-Clause"
] | 1 | 2019-05-02T17:24:05.000Z | 2019-05-02T17:24:05.000Z | saleor/discount/models.py | valentine217/saleor | 323963748e6a2702265ec6635b930a234abde4f5 | [
"BSD-3-Clause"
] | null | null | null | saleor/discount/models.py | valentine217/saleor | 323963748e6a2702265ec6635b930a234abde4f5 | [
"BSD-3-Clause"
] | 1 | 2019-05-23T07:30:50.000Z | 2019-05-23T07:30:50.000Z | from datetime import date
from decimal import Decimal
from functools import partial
from django.conf import settings
from django.db import models
from django.db.models import F, Q
from django.utils.translation import pgettext, pgettext_lazy
from django_countries.fields import CountryField
from django_prices.models import MoneyField
from django_prices.templatetags.prices_i18n import amount
from prices import Money, fixed_discount, percentage_discount
from ..core.utils.translations import TranslationProxy
from . import DiscountValueType, VoucherType
class NotApplicable(ValueError):
"""Exception raised when a discount is not applicable to a checkout.
The error is raised if the order value is below the minimum required
price.
Minimum price will be available as the `min_amount_spent` attribute.
"""
def __init__(self, msg, min_amount_spent=None):
super().__init__(msg)
self.min_amount_spent = min_amount_spent
class VoucherQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(usage_limit__isnull=True) | Q(used__lt=F('usage_limit')),
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
Q(used__gte=F('usage_limit')) | Q(end_date__lt=date),
start_date__lt=date)
class Voucher(models.Model):
type = models.CharField(
max_length=20, choices=VoucherType.CHOICES, default=VoucherType.VALUE)
name = models.CharField(max_length=255, null=True, blank=True)
code = models.CharField(max_length=12, unique=True, db_index=True)
usage_limit = models.PositiveIntegerField(null=True, blank=True)
used = models.PositiveIntegerField(default=0, editable=False)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
# this field indicates if discount should be applied per order or
# individually to every item
apply_once_per_order = models.BooleanField(default=False)
discount_value_type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
discount_value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES)
# not mandatory fields, usage depends on type
countries = CountryField(multiple=True, blank=True)
min_amount_spent = MoneyField(
currency=settings.DEFAULT_CURRENCY,
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES, null=True, blank=True)
products = models.ManyToManyField('product.Product', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
objects = VoucherQueryset.as_manager()
translated = TranslationProxy()
def __str__(self):
if self.name:
return self.name
discount = '%s %s' % (
self.discount_value, self.get_discount_value_type_display())
if self.type == VoucherType.SHIPPING:
if self.is_free:
return pgettext('Voucher type', 'Free shipping')
return pgettext(
'Voucher type',
'%(discount)s off shipping') % {'discount': discount}
if self.type == VoucherType.PRODUCT:
products = len(self.products.all())
if products:
return pgettext(
'Voucher type',
'%(discount)s off %(product_num)d products') % {
'discount': discount,
'product_num': products}
if self.type == VoucherType.COLLECTION:
collections = len(self.collections.all())
if collections:
return pgettext(
'Voucher type',
'%(discount)s off %(collections_num)d collections') % {
'discount': discount,
'collections_num': collections}
if self.type == VoucherType.CATEGORY:
categories = len(self.categories.all())
if categories:
return pgettext(
'Voucher type',
'%(discount)s off %(categories_num)d categories') % {
'discount': discount,
'categories_num': categories}
return pgettext(
'Voucher type', '%(discount)s off') % {'discount': discount}
@property
def is_free(self):
return (
self.discount_value == Decimal(100) and
self.discount_value_type == DiscountValueType.PERCENTAGE)
def get_discount(self):
if self.discount_value_type == DiscountValueType.FIXED:
discount_amount = Money(
self.discount_value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.discount_value_type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.discount_value)
raise NotImplementedError('Unknown discount type')
def get_discount_amount_for(self, price):
discount = self.get_discount()
gross_price = price.gross
gross_after_discount = discount(gross_price)
if gross_after_discount.amount < 0:
return gross_price
return gross_price - gross_after_discount
def validate_min_amount_spent(self, value):
min_amount_spent = self.min_amount_spent
if min_amount_spent and value.gross < min_amount_spent:
msg = pgettext(
'Voucher not applicable',
'This offer is only valid for orders over %(amount)s.')
raise NotApplicable(
msg % {'amount': amount(min_amount_spent)},
min_amount_spent=min_amount_spent)
class SaleQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
end_date__lt=date, start_date__lt=date)
class VoucherTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
voucher = models.ForeignKey(
Voucher, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'voucher'),)
class Sale(models.Model):
name = models.CharField(max_length=255)
type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0)
products = models.ManyToManyField('product.Product', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
objects = SaleQueryset.as_manager()
translated = TranslationProxy()
class Meta:
app_label = 'discount'
permissions = ((
'manage_discounts', pgettext_lazy(
'Permission description', 'Manage sales and vouchers.')),)
def __repr__(self):
return 'Sale(name=%r, value=%r, type=%s)' % (
str(self.name), self.value, self.get_type_display())
def __str__(self):
return self.name
def get_discount(self):
if self.type == DiscountValueType.FIXED:
discount_amount = Money(self.value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.value)
raise NotImplementedError('Unknown discount type')
class SaleTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
sale = models.ForeignKey(
Sale, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'sale'),)
| 39.486111 | 79 | 0.663735 | from datetime import date
from decimal import Decimal
from functools import partial
from django.conf import settings
from django.db import models
from django.db.models import F, Q
from django.utils.translation import pgettext, pgettext_lazy
from django_countries.fields import CountryField
from django_prices.models import MoneyField
from django_prices.templatetags.prices_i18n import amount
from prices import Money, fixed_discount, percentage_discount
from ..core.utils.translations import TranslationProxy
from . import DiscountValueType, VoucherType
class NotApplicable(ValueError):
def __init__(self, msg, min_amount_spent=None):
super().__init__(msg)
self.min_amount_spent = min_amount_spent
class VoucherQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(usage_limit__isnull=True) | Q(used__lt=F('usage_limit')),
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
Q(used__gte=F('usage_limit')) | Q(end_date__lt=date),
start_date__lt=date)
class Voucher(models.Model):
type = models.CharField(
max_length=20, choices=VoucherType.CHOICES, default=VoucherType.VALUE)
name = models.CharField(max_length=255, null=True, blank=True)
code = models.CharField(max_length=12, unique=True, db_index=True)
usage_limit = models.PositiveIntegerField(null=True, blank=True)
used = models.PositiveIntegerField(default=0, editable=False)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
apply_once_per_order = models.BooleanField(default=False)
discount_value_type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
discount_value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES)
countries = CountryField(multiple=True, blank=True)
min_amount_spent = MoneyField(
currency=settings.DEFAULT_CURRENCY,
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES, null=True, blank=True)
products = models.ManyToManyField('product.Product', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
objects = VoucherQueryset.as_manager()
translated = TranslationProxy()
def __str__(self):
if self.name:
return self.name
discount = '%s %s' % (
self.discount_value, self.get_discount_value_type_display())
if self.type == VoucherType.SHIPPING:
if self.is_free:
return pgettext('Voucher type', 'Free shipping')
return pgettext(
'Voucher type',
'%(discount)s off shipping') % {'discount': discount}
if self.type == VoucherType.PRODUCT:
products = len(self.products.all())
if products:
return pgettext(
'Voucher type',
'%(discount)s off %(product_num)d products') % {
'discount': discount,
'product_num': products}
if self.type == VoucherType.COLLECTION:
collections = len(self.collections.all())
if collections:
return pgettext(
'Voucher type',
'%(discount)s off %(collections_num)d collections') % {
'discount': discount,
'collections_num': collections}
if self.type == VoucherType.CATEGORY:
categories = len(self.categories.all())
if categories:
return pgettext(
'Voucher type',
'%(discount)s off %(categories_num)d categories') % {
'discount': discount,
'categories_num': categories}
return pgettext(
'Voucher type', '%(discount)s off') % {'discount': discount}
@property
def is_free(self):
return (
self.discount_value == Decimal(100) and
self.discount_value_type == DiscountValueType.PERCENTAGE)
def get_discount(self):
if self.discount_value_type == DiscountValueType.FIXED:
discount_amount = Money(
self.discount_value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.discount_value_type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.discount_value)
raise NotImplementedError('Unknown discount type')
def get_discount_amount_for(self, price):
discount = self.get_discount()
gross_price = price.gross
gross_after_discount = discount(gross_price)
if gross_after_discount.amount < 0:
return gross_price
return gross_price - gross_after_discount
def validate_min_amount_spent(self, value):
min_amount_spent = self.min_amount_spent
if min_amount_spent and value.gross < min_amount_spent:
msg = pgettext(
'Voucher not applicable',
'This offer is only valid for orders over %(amount)s.')
raise NotApplicable(
msg % {'amount': amount(min_amount_spent)},
min_amount_spent=min_amount_spent)
class SaleQueryset(models.QuerySet):
def active(self, date):
return self.filter(
Q(end_date__isnull=True) | Q(end_date__gte=date),
start_date__lte=date)
def expired(self, date):
return self.filter(
end_date__lt=date, start_date__lt=date)
class VoucherTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
voucher = models.ForeignKey(
Voucher, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'voucher'),)
class Sale(models.Model):
name = models.CharField(max_length=255)
type = models.CharField(
max_length=10, choices=DiscountValueType.CHOICES,
default=DiscountValueType.FIXED)
value = models.DecimalField(
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=0)
products = models.ManyToManyField('product.Product', blank=True)
categories = models.ManyToManyField('product.Category', blank=True)
collections = models.ManyToManyField('product.Collection', blank=True)
start_date = models.DateField(default=date.today)
end_date = models.DateField(null=True, blank=True)
objects = SaleQueryset.as_manager()
translated = TranslationProxy()
class Meta:
app_label = 'discount'
permissions = ((
'manage_discounts', pgettext_lazy(
'Permission description', 'Manage sales and vouchers.')),)
def __repr__(self):
return 'Sale(name=%r, value=%r, type=%s)' % (
str(self.name), self.value, self.get_type_display())
def __str__(self):
return self.name
def get_discount(self):
if self.type == DiscountValueType.FIXED:
discount_amount = Money(self.value, settings.DEFAULT_CURRENCY)
return partial(fixed_discount, discount=discount_amount)
if self.type == DiscountValueType.PERCENTAGE:
return partial(percentage_discount, percentage=self.value)
raise NotImplementedError('Unknown discount type')
class SaleTranslation(models.Model):
language_code = models.CharField(max_length=10)
name = models.CharField(max_length=255, null=True, blank=True)
sale = models.ForeignKey(
Sale, related_name='translations', on_delete=models.CASCADE)
class Meta:
unique_together = (('language_code', 'sale'),)
| true | true |
f7312755a82193a6713ea6ec20053148ab734cac | 1,585 | py | Python | migrations/versions/f0793141fd6b_added_notifications.py | dyachoksa/flask-microblog | f956ba0199ab3fd4226806a9c1e0b3b38092c3d4 | [
"MIT"
] | null | null | null | migrations/versions/f0793141fd6b_added_notifications.py | dyachoksa/flask-microblog | f956ba0199ab3fd4226806a9c1e0b3b38092c3d4 | [
"MIT"
] | null | null | null | migrations/versions/f0793141fd6b_added_notifications.py | dyachoksa/flask-microblog | f956ba0199ab3fd4226806a9c1e0b3b38092c3d4 | [
"MIT"
] | null | null | null | """Added notifications
Revision ID: f0793141fd6b
Revises: 9ecc68fdc92d
Create Date: 2020-05-02 17:17:31.252794
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "f0793141fd6b"
down_revision = "9ecc68fdc92d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"notification",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=128), nullable=True),
sa.Column("user_id", sa.Integer(), nullable=True),
sa.Column("timestamp", sa.Float(), nullable=True),
sa.Column("payload_json", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["user.id"],),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_notification_name"), "notification", ["name"], unique=False
)
op.create_index(
op.f("ix_notification_timestamp"), "notification", ["timestamp"], unique=False
)
op.create_index(
op.f("ix_notification_user_id"), "notification", ["user_id"], unique=False
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_notification_user_id"), table_name="notification")
op.drop_index(op.f("ix_notification_timestamp"), table_name="notification")
op.drop_index(op.f("ix_notification_name"), table_name="notification")
op.drop_table("notification")
# ### end Alembic commands ###
| 31.7 | 86 | 0.666877 | from alembic import op
import sqlalchemy as sa
revision = "f0793141fd6b"
down_revision = "9ecc68fdc92d"
branch_labels = None
depends_on = None
def upgrade():
umn("user_id", sa.Integer(), nullable=True),
sa.Column("timestamp", sa.Float(), nullable=True),
sa.Column("payload_json", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(["user_id"], ["user.id"],),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_notification_name"), "notification", ["name"], unique=False
)
op.create_index(
op.f("ix_notification_timestamp"), "notification", ["timestamp"], unique=False
)
op.create_index(
op.f("ix_notification_user_id"), "notification", ["user_id"], unique=False
)
op_table("notification")
| true | true |
f73127aea2857de2faadcfe865b063892f3787a7 | 1,823 | py | Python | elfsample.py | TheMindVirus/pico-uf22elf | ee5d95208851e6eba4b21675cae66fdf07176d0e | [
"MIT"
] | null | null | null | elfsample.py | TheMindVirus/pico-uf22elf | ee5d95208851e6eba4b21675cae66fdf07176d0e | [
"MIT"
] | null | null | null | elfsample.py | TheMindVirus/pico-uf22elf | ee5d95208851e6eba4b21675cae66fdf07176d0e | [
"MIT"
] | null | null | null | data = b""
data += b"\x7F\x45\x4C\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x02\x00\x28\x00\x01\x00\x00\x00\x60\x00\x00\x00\x40\x00\x00\x00"
data += b"\xB0\x00\x00\x00\x00\x00\x00\x00\x34\x00\x20\x00\x01\x00\x28\x00"
data += b"\x04\x00\x03\x00"
data += b"\x00" * (0x40 - len(data))
data += b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x08"
data += b"\x90\x00\x00\x00\x90\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00" * (0x60 - len(data))
data += b"\x0D\x20\xA0\xE3\x14\x10\x8F\xE2\x01\x00\xA0\xE3\x04\x70\xA0\xE3"
data += b"\x00\x00\x00\xEF\x01\x00\xA0\xE3\x01\x70\xA0\xE3\x00\x00\x00\xEF"
data += b"\x00" * (0x80 - len(data))
data += b"\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x21\x0A\x00" # Hello World!
data += b"\x00" * (0x90 - len(data))
data += b"\x00\x2E\x73\x68\x73\x74\x72\x74\x61\x62\x00\x2E\x74\x65\x78\x74" #.shstrtab
data += b"\x00\x2E\x72\x6F\x64\x61\x74\x61\x00"
data += b"\x00" * (0xB0 - len(data))
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x0B\x00\x00\x00\x01\x00\x00\x00"
data += b"\x06\x00\x00\x00\x60\x00\x00\x08\x60\x00\x00\x00\x20\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x11\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x80\x00\x00\x08"
data += b"\x00\x00\x00\x00\x0D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x90\x00\x00\x00\x19\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
file = open("sample.elf", "wb")
file.write(data)
file.close()
print("Done!")
| 41.431818 | 86 | 0.668129 | data = b""
data += b"\x7F\x45\x4C\x46\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x02\x00\x28\x00\x01\x00\x00\x00\x60\x00\x00\x00\x40\x00\x00\x00"
data += b"\xB0\x00\x00\x00\x00\x00\x00\x00\x34\x00\x20\x00\x01\x00\x28\x00"
data += b"\x04\x00\x03\x00"
data += b"\x00" * (0x40 - len(data))
data += b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x08"
data += b"\x90\x00\x00\x00\x90\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00" * (0x60 - len(data))
data += b"\x0D\x20\xA0\xE3\x14\x10\x8F\xE2\x01\x00\xA0\xE3\x04\x70\xA0\xE3"
data += b"\x00\x00\x00\xEF\x01\x00\xA0\xE3\x01\x70\xA0\xE3\x00\x00\x00\xEF"
data += b"\x00" * (0x80 - len(data))
data += b"\x48\x65\x6C\x6C\x6F\x20\x57\x6F\x72\x6C\x64\x21\x0A\x00"
data += b"\x00" * (0x90 - len(data))
data += b"\x00\x2E\x73\x68\x73\x74\x72\x74\x61\x62\x00\x2E\x74\x65\x78\x74"
data += b"\x00\x2E\x72\x6F\x64\x61\x74\x61\x00"
data += b"\x00" * (0xB0 - len(data))
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x0B\x00\x00\x00\x01\x00\x00\x00"
data += b"\x06\x00\x00\x00\x60\x00\x00\x08\x60\x00\x00\x00\x20\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x11\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x80\x00\x00\x08"
data += b"\x00\x00\x00\x00\x0D\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x90\x00\x00\x00\x19\x00\x00\x00"
data += b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
file = open("sample.elf", "wb")
file.write(data)
file.close()
print("Done!")
| true | true |
f731283f2aec8441075c7506a401da140cdbead6 | 20,464 | py | Python | test/functional/test_framework/test_framework.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_framework.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | test/functional/test_framework/test_framework.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a bitcoinflex test script.
Individual bitcoinflex test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinflexds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinflexds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitcoinflexd/bitcoinflex-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinflexds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoinflexd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinflexds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a bitcoinflexd test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoinflexd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'bitcoinflexd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoinflexd should have exited with an error"
else:
assert_msg = "bitcoinflexd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoinflexd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "bitcoinflexd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some bitcoinflexd binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| 42.020534 | 310 | 0.622215 |
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
def __init__(self):
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinflexds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinflexds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing bitcoinflexd/bitcoinflex-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinflexds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'bitcoinflexd exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "bitcoinflexd should have exited with an error"
else:
assert_msg = "bitcoinflexd should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoinflexd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "bitcoinflexd"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
for node in self.nodes:
node.wait_for_rpc_connection()
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
sync_blocks(self.nodes)
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i)
def _initialize_chain_clean(self):
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "bitcoinflexd"),
help="bitcoinflexd binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
def __init__(self, message):
self.message = message
| true | true |
f731292a1a4cec34ff126f57ab5b89f88bdd9de5 | 2,733 | py | Python | test/test.py | QITI/Halftones | 8d0692b88711e858a93b90941abd6cb794a592a3 | [
"OLDAP-2.6",
"Python-2.0"
] | 4 | 2020-10-11T13:36:54.000Z | 2021-10-08T07:10:03.000Z | test/test.py | QITI/Halftones | 8d0692b88711e858a93b90941abd6cb794a592a3 | [
"OLDAP-2.6",
"Python-2.0"
] | null | null | null | test/test.py | QITI/Halftones | 8d0692b88711e858a93b90941abd6cb794a592a3 | [
"OLDAP-2.6",
"Python-2.0"
] | 2 | 2019-10-04T16:54:47.000Z | 2020-05-18T13:34:17.000Z | import halftones
from scipy.misc import *
gray = imread('lena1.jpg', True)
# halftones
jarvis = halftones.halftone.error_diffusion_jarvis(gray)
floyd_steinberg = halftones.halftone.error_diffusion_floyd_steinberg(gray)
stucki = halftones.halftone.error_diffusion_stucki(gray)
burkes = halftones.halftone.error_diffusion_burkes(gray)
sierra3 = halftones.halftone.error_diffusion_sierra3(gray)
sierra2 = halftones.halftone.error_diffusion_sierra2(gray)
sierra_simple = halftones.halftone.error_diffusion_sierra_simple(gray)
atkinson = halftones.halftone.error_diffusion_atkinson(gray)
shiaufan = halftones.halftone.error_diffusion_shiaufan(gray)
combinatorial3x3 = halftones.halftone.ordered_combinatorial3(gray)
combinatorial2x2 = halftones.halftone.ordered_combinatorial2(gray)
combinatorial4x4 = halftones.halftone.ordered_combinatorial4(gray)
OD = halftones.halftone.ordered_dithering_generalized_bayer(gray, 3)
diagonal_matrix = halftones.halftone.ordered_dithering_diagonal_ordered_matrix(gray)
clustered_dots = halftones.halftone.ordered_dithering_clustered_dots(gray)
central_white_points = halftones.halftone.ordered_dithering_central_white_point(gray)
balanced_centered_points = halftones.halftone.ordered_dithering_balanced_centered_point(gray)
dispersed_dots = halftones.halftone.ordered_dithering_dispersed_dots(gray)
# inverse halftones
inverseJarvis = halftones.inverse_halftone.inverse_fbih(jarvis)
inverseComb2x2 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial2(combinatorial2x2)
inverseComb3x3 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial3(combinatorial3x3)
inverseComb4x4 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial4(combinatorial4x4)
inverseOD = halftones.inverse_halftone.inverse_ordered_dithering_generalized(OD, 3)
# save some figures
imsave('halftone_ordered_dither_bayer.png', OD)
imsave('halftone_jarvis.png', jarvis)
imsave('halftone_stucki.png', stucki)
imsave('halftone_burkes.png', burkes)
imsave('halftone_sierra3.png', sierra3)
imsave('halftone_sierra2.png', sierra2)
imsave('halftone_sierra_simple.png', sierra_simple)
imsave('halftone_atkinson.png', atkinson)
imsave('halftone_shiaufan.png', shiaufan)
imsave('halftone_floyd_steinberg.png', floyd_steinberg)
imsave('halftone_ordered_dither_diagonal_matrix.png', diagonal_matrix)
imsave('halftone_ordered_dither_clustered_dots.png', clustered_dots)
imsave('halftone_ordered_dither_central_white_points.png', central_white_points)
imsave('halftone_ordered_dither_balanced_centered_points.png', balanced_centered_points)
imsave('halftone_ordered_dither_dispersed_dots.png', dispersed_dots)
imsave('inverse_ordered_dither.png', inverseOD)
imsave('inverse_jarvis.png', inverseJarvis)
| 52.557692 | 101 | 0.868277 | import halftones
from scipy.misc import *
gray = imread('lena1.jpg', True)
jarvis = halftones.halftone.error_diffusion_jarvis(gray)
floyd_steinberg = halftones.halftone.error_diffusion_floyd_steinberg(gray)
stucki = halftones.halftone.error_diffusion_stucki(gray)
burkes = halftones.halftone.error_diffusion_burkes(gray)
sierra3 = halftones.halftone.error_diffusion_sierra3(gray)
sierra2 = halftones.halftone.error_diffusion_sierra2(gray)
sierra_simple = halftones.halftone.error_diffusion_sierra_simple(gray)
atkinson = halftones.halftone.error_diffusion_atkinson(gray)
shiaufan = halftones.halftone.error_diffusion_shiaufan(gray)
combinatorial3x3 = halftones.halftone.ordered_combinatorial3(gray)
combinatorial2x2 = halftones.halftone.ordered_combinatorial2(gray)
combinatorial4x4 = halftones.halftone.ordered_combinatorial4(gray)
OD = halftones.halftone.ordered_dithering_generalized_bayer(gray, 3)
diagonal_matrix = halftones.halftone.ordered_dithering_diagonal_ordered_matrix(gray)
clustered_dots = halftones.halftone.ordered_dithering_clustered_dots(gray)
central_white_points = halftones.halftone.ordered_dithering_central_white_point(gray)
balanced_centered_points = halftones.halftone.ordered_dithering_balanced_centered_point(gray)
dispersed_dots = halftones.halftone.ordered_dithering_dispersed_dots(gray)
inverseJarvis = halftones.inverse_halftone.inverse_fbih(jarvis)
inverseComb2x2 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial2(combinatorial2x2)
inverseComb3x3 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial3(combinatorial3x3)
inverseComb4x4 = halftones.inverse_halftone.inverse_halftone_ordered_combinatorial4(combinatorial4x4)
inverseOD = halftones.inverse_halftone.inverse_ordered_dithering_generalized(OD, 3)
imsave('halftone_ordered_dither_bayer.png', OD)
imsave('halftone_jarvis.png', jarvis)
imsave('halftone_stucki.png', stucki)
imsave('halftone_burkes.png', burkes)
imsave('halftone_sierra3.png', sierra3)
imsave('halftone_sierra2.png', sierra2)
imsave('halftone_sierra_simple.png', sierra_simple)
imsave('halftone_atkinson.png', atkinson)
imsave('halftone_shiaufan.png', shiaufan)
imsave('halftone_floyd_steinberg.png', floyd_steinberg)
imsave('halftone_ordered_dither_diagonal_matrix.png', diagonal_matrix)
imsave('halftone_ordered_dither_clustered_dots.png', clustered_dots)
imsave('halftone_ordered_dither_central_white_points.png', central_white_points)
imsave('halftone_ordered_dither_balanced_centered_points.png', balanced_centered_points)
imsave('halftone_ordered_dither_dispersed_dots.png', dispersed_dots)
imsave('inverse_ordered_dither.png', inverseOD)
imsave('inverse_jarvis.png', inverseJarvis)
| true | true |
f7312a9191b2633265f3e838996158b974786d78 | 4,679 | py | Python | desitrip/py/desitrip/scripts/process.py | EquinoxOmega0/timedomain | 092dcda58ed380cdb41f02c1c7af33ac19c52b63 | [
"MIT"
] | 4 | 2021-02-24T15:02:35.000Z | 2022-01-18T19:24:27.000Z | desitrip/py/desitrip/scripts/process.py | MatthewPortman/timedomain | b9c6c2e6804d7dde56311d9402769be545d505d0 | [
"MIT"
] | 35 | 2020-11-06T17:51:08.000Z | 2021-10-14T01:47:16.000Z | desitrip/py/desitrip/scripts/process.py | MatthewPortman/timedomain | b9c6c2e6804d7dde56311d9402769be545d505d0 | [
"MIT"
] | 10 | 2020-03-13T20:34:15.000Z | 2021-09-23T13:35:27.000Z | #!/usr/bin/env python
"""Apply the DESITrIP CNN classifier to observed spectra,
chosen by tile ID and date.
"""
from desispec.io import read_spectra, write_spectra
from desispec.spectra import Spectra
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitrip.preproc import rebin_flux, rescale_flux
from astropy.io import fits
from astropy.table import Table, vstack, hstack
from glob import glob
from datetime import date
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow import keras
p = ArgumentParser(description='DESITrIP data processing',
formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('--tile', type=int, default=0,
help='Tile ID for processing.')
p.add_argument('--date', default=date.today().strftime('%Y%m%d'),
help='Date of observation [YYYYMMDD]')
p.add_argument('--tfmodel', default=None,
help='TensorFlow model HDF5 definition')
args = p.parse_args()
# Access redux folder.
redux='/global/project/projectdirs/desi/spectro/redux/daily/tiles'
prefix_in='/'.join([redux, '{:05d}'.format(args.tile), args.date])
if not os.path.isdir(prefix_in):
raise SystemExit('{} does not exist.'.format(prefix_in))
# Set up BGS target bit selection.
cmx_bgs_bits = '|'.join([_ for _ in cmx_mask.names() if 'BGS' in _])
# List zbest and coadd files.
zbfiles = sorted(glob('{}/zbest*.fits'.format(prefix_in)))
cafiles = sorted(glob('{}/coadd*.fits'.format(prefix_in)))
if args.tfmodel is not None:
classifier = keras.models.load_model(args.tfmodel)
# Loop through zbest and coadd files for each petal.
# Extract the fibermaps, ZBEST tables, and spectra.
# Keep only BGS targets passing basic event selection.
allzbest = None
allfmap = None
allwave = None
allflux = None
allivar = None
allmask = None
allres = None
for cafile, zbfile in zip(cafiles, zbfiles):
# Access data per petal.
zbest = Table.read(zbfile, 'ZBEST')
fibermap = Table.read(zbfile, 'FIBERMAP')
pspectra = read_spectra(cafile)
# Apply standard event selection.
isTGT = fibermap['OBJTYPE'] == 'TGT'
isGAL = zbest['SPECTYPE'] == 'GALAXY'
isBGS = fibermap['CMX_TARGET'] & cmx_mask.mask(cmx_bgs_bits) != 0
select = isTGT & isGAL & isBGS
# Accumulate spectrum data.
if allzbest is None:
allzbest = zbest[select]
allfmap = fibermap[select]
allwave = pspectra.wave['brz']
allflux = pspectra.flux['brz'][select]
allivar = pspectra.ivar['brz'][select]
allmask = pspectra.mask['brz'][select]
allres = pspectra.resolution_data['brz'][select]
else:
allzbest = vstack([allzbest, zbest[select]])
allfmap = vstack([allfmap, fibermap[select]])
allflux = np.vstack([allflux, pspectra.flux['brz'][select]])
allivar = np.vstack([allivar, pspectra.ivar['brz'][select]])
allmask = np.vstack([allmask, pspectra.mask['brz'][select]])
allres = np.vstack([allres, pspectra.resolution_data['brz'][select]])
# Apply the DESITrIP preprocessing to selected spectra.
rewave, reflux, reivar = rebin_flux(allwave, allflux, allivar, allzbest['Z'],
minwave=2500., maxwave=9500., nbins=150,
log=True, clip=True)
rsflux = rescale_flux(reflux)
# Run the classification.
if args.tfmodel is not None:
pred = classifier.predict(rsflux)
# Create output: selected target spectra.
selected_spectra = Spectra(bands=['brz'],
wave={'brz' : allwave},
flux={'brz' : allflux},
ivar={'brz' : allivar},
mask={'brz' : allmask},
resolution_data={'brz' : allres},
fibermap=allfmap)
write_spectra('selected-{}-{}.fits'.format(args.tile, args.date), selected_spectra)
# Append preprocess spectra to output.
hx = fits.HDUList()
hdu_rewave = fits.PrimaryHDU(rewave)
hdu_rewave.header['EXTNAME'] = 'REWAVE'
hdu_rewave.header['BUNIT'] = 'Angstrom'
hdu_rewave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_rewave)
hdu_reflux = fits.ImageHDU(reflux)
hdu_reflux.header['EXTNAME'] = 'REFLUX'
hx.append(hdu_reflux)
hdu_rsflux = fits.ImageHDU(rsflux)
hdu_rsflux.header['EXTNAME'] = 'RSFLUX'
hx.append(hdu_rsflux)
hdu_classify = fits.ImageHDU(pred)
hdu_classify.header['EXTNAME'] = 'OBJCLASS'
hx.append(hdu_classify)
hx.append(fits.BinTableHDU(allzbest))
hx.writeto('reduced-{}-{}.fits'.format(args.tile, args.date), overwrite=True)
| 34.404412 | 83 | 0.674717 |
from desispec.io import read_spectra, write_spectra
from desispec.spectra import Spectra
from desitarget.cmx.cmx_targetmask import cmx_mask
from desitrip.preproc import rebin_flux, rescale_flux
from astropy.io import fits
from astropy.table import Table, vstack, hstack
from glob import glob
from datetime import date
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from tensorflow import keras
p = ArgumentParser(description='DESITrIP data processing',
formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('--tile', type=int, default=0,
help='Tile ID for processing.')
p.add_argument('--date', default=date.today().strftime('%Y%m%d'),
help='Date of observation [YYYYMMDD]')
p.add_argument('--tfmodel', default=None,
help='TensorFlow model HDF5 definition')
args = p.parse_args()
redux='/global/project/projectdirs/desi/spectro/redux/daily/tiles'
prefix_in='/'.join([redux, '{:05d}'.format(args.tile), args.date])
if not os.path.isdir(prefix_in):
raise SystemExit('{} does not exist.'.format(prefix_in))
cmx_bgs_bits = '|'.join([_ for _ in cmx_mask.names() if 'BGS' in _])
zbfiles = sorted(glob('{}/zbest*.fits'.format(prefix_in)))
cafiles = sorted(glob('{}/coadd*.fits'.format(prefix_in)))
if args.tfmodel is not None:
classifier = keras.models.load_model(args.tfmodel)
allzbest = None
allfmap = None
allwave = None
allflux = None
allivar = None
allmask = None
allres = None
for cafile, zbfile in zip(cafiles, zbfiles):
zbest = Table.read(zbfile, 'ZBEST')
fibermap = Table.read(zbfile, 'FIBERMAP')
pspectra = read_spectra(cafile)
isTGT = fibermap['OBJTYPE'] == 'TGT'
isGAL = zbest['SPECTYPE'] == 'GALAXY'
isBGS = fibermap['CMX_TARGET'] & cmx_mask.mask(cmx_bgs_bits) != 0
select = isTGT & isGAL & isBGS
if allzbest is None:
allzbest = zbest[select]
allfmap = fibermap[select]
allwave = pspectra.wave['brz']
allflux = pspectra.flux['brz'][select]
allivar = pspectra.ivar['brz'][select]
allmask = pspectra.mask['brz'][select]
allres = pspectra.resolution_data['brz'][select]
else:
allzbest = vstack([allzbest, zbest[select]])
allfmap = vstack([allfmap, fibermap[select]])
allflux = np.vstack([allflux, pspectra.flux['brz'][select]])
allivar = np.vstack([allivar, pspectra.ivar['brz'][select]])
allmask = np.vstack([allmask, pspectra.mask['brz'][select]])
allres = np.vstack([allres, pspectra.resolution_data['brz'][select]])
rewave, reflux, reivar = rebin_flux(allwave, allflux, allivar, allzbest['Z'],
minwave=2500., maxwave=9500., nbins=150,
log=True, clip=True)
rsflux = rescale_flux(reflux)
if args.tfmodel is not None:
pred = classifier.predict(rsflux)
selected_spectra = Spectra(bands=['brz'],
wave={'brz' : allwave},
flux={'brz' : allflux},
ivar={'brz' : allivar},
mask={'brz' : allmask},
resolution_data={'brz' : allres},
fibermap=allfmap)
write_spectra('selected-{}-{}.fits'.format(args.tile, args.date), selected_spectra)
hx = fits.HDUList()
hdu_rewave = fits.PrimaryHDU(rewave)
hdu_rewave.header['EXTNAME'] = 'REWAVE'
hdu_rewave.header['BUNIT'] = 'Angstrom'
hdu_rewave.header['AIRORVAC'] = ('vac', 'Vacuum wavelengths')
hx.append(hdu_rewave)
hdu_reflux = fits.ImageHDU(reflux)
hdu_reflux.header['EXTNAME'] = 'REFLUX'
hx.append(hdu_reflux)
hdu_rsflux = fits.ImageHDU(rsflux)
hdu_rsflux.header['EXTNAME'] = 'RSFLUX'
hx.append(hdu_rsflux)
hdu_classify = fits.ImageHDU(pred)
hdu_classify.header['EXTNAME'] = 'OBJCLASS'
hx.append(hdu_classify)
hx.append(fits.BinTableHDU(allzbest))
hx.writeto('reduced-{}-{}.fits'.format(args.tile, args.date), overwrite=True)
| true | true |
f7312aa4fd65012229519ec351624dc9f6a06ed9 | 3,060 | py | Python | ethtx_ce/backend/processors/abi_processor/balances.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | 1 | 2021-07-26T11:05:24.000Z | 2021-07-26T11:05:24.000Z | ethtx_ce/backend/processors/abi_processor/balances.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | null | null | null | ethtx_ce/backend/processors/abi_processor/balances.py | daifoundation/ethtx_ce | 5aa0de0e33f6af569e840554b024116c9a21d54b | [
"Apache-2.0"
] | 1 | 2021-07-26T11:05:32.000Z | 2021-07-26T11:05:32.000Z | # Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import List
from .abc import ABISubmoduleAbc
from ...models.decoded_model import DecodedTransfer, DecodedBalance, AddressInfo
ZERO_ADDRESS = "0x" + 40 * "0"
class ABIBalancesDecoder(ABISubmoduleAbc):
""" Abi Balances Decoder. """
def decode(self, transfers: List[DecodedTransfer]) -> List:
""" Decode balances. """
balance_holders = dict()
balance_tokens = dict()
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_holders[
transfer.from_address.address
] = transfer.from_address.name
if transfer.to_address.address != ZERO_ADDRESS:
balance_holders[transfer.to_address.address] = transfer.to_address.name
balance_tokens[transfer.token_address] = (
transfer.token_standard,
transfer.token_symbol,
)
balance_sheet: dict = {address: defaultdict(int) for address in balance_holders}
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_sheet[transfer.from_address.address][
transfer.token_address
] -= transfer.value
if transfer.to_address.address != ZERO_ADDRESS:
balance_sheet[transfer.to_address.address][
transfer.token_address
] += transfer.value
balances = []
for holder_address in balance_holders:
tokens = []
for token_address in balance_sheet[holder_address]:
if balance_sheet[holder_address][token_address]:
token_standard, token_symbol = balance_tokens[token_address]
tokens.append(
dict(
token_address=token_address,
token_symbol=token_symbol,
token_standard=token_standard,
balance=balance_sheet[holder_address][token_address],
)
)
if tokens:
holder_name = balance_holders[holder_address]
balances.append(
DecodedBalance(
holder=AddressInfo(holder_address, holder_name), tokens=tokens
)
)
return balances
| 39.230769 | 88 | 0.605556 |
from collections import defaultdict
from typing import List
from .abc import ABISubmoduleAbc
from ...models.decoded_model import DecodedTransfer, DecodedBalance, AddressInfo
ZERO_ADDRESS = "0x" + 40 * "0"
class ABIBalancesDecoder(ABISubmoduleAbc):
def decode(self, transfers: List[DecodedTransfer]) -> List:
balance_holders = dict()
balance_tokens = dict()
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_holders[
transfer.from_address.address
] = transfer.from_address.name
if transfer.to_address.address != ZERO_ADDRESS:
balance_holders[transfer.to_address.address] = transfer.to_address.name
balance_tokens[transfer.token_address] = (
transfer.token_standard,
transfer.token_symbol,
)
balance_sheet: dict = {address: defaultdict(int) for address in balance_holders}
for transfer in transfers:
if transfer.from_address.address != ZERO_ADDRESS:
balance_sheet[transfer.from_address.address][
transfer.token_address
] -= transfer.value
if transfer.to_address.address != ZERO_ADDRESS:
balance_sheet[transfer.to_address.address][
transfer.token_address
] += transfer.value
balances = []
for holder_address in balance_holders:
tokens = []
for token_address in balance_sheet[holder_address]:
if balance_sheet[holder_address][token_address]:
token_standard, token_symbol = balance_tokens[token_address]
tokens.append(
dict(
token_address=token_address,
token_symbol=token_symbol,
token_standard=token_standard,
balance=balance_sheet[holder_address][token_address],
)
)
if tokens:
holder_name = balance_holders[holder_address]
balances.append(
DecodedBalance(
holder=AddressInfo(holder_address, holder_name), tokens=tokens
)
)
return balances
| true | true |
f7312b9e3d53282b3517fa0a599b4e1dc17e254b | 827 | py | Python | ehome/utils/response_code.py | gavinliu4011/eHome | 2fb06a40ba7092835bd0904145086868cb9d45ed | [
"Apache-2.0"
] | 4 | 2018-07-12T11:49:05.000Z | 2020-03-23T15:14:15.000Z | ehome/utils/response_code.py | gavinliu4011/eHome | 2fb06a40ba7092835bd0904145086868cb9d45ed | [
"Apache-2.0"
] | null | null | null | ehome/utils/response_code.py | gavinliu4011/eHome | 2fb06a40ba7092835bd0904145086868cb9d45ed | [
"Apache-2.0"
] | null | null | null | class RET:
OK = '0'
DBERR = '4001'
NODATA = '4002'
DATAEXIST = '4003'
DATAERR = '4004'
SESSIONERR = '4101'
LOGINERR = '4102'
PARAMERR = '4103'
USERERR = '4104'
ROLEERR = '4105'
PWDERR = '4106'
REQERR = '4201'
IPERR = '4202'
THIRDERR = '4301'
IOERR = '4302'
SERVERERR = '4500'
UNKOWNERR = '4501'
error_map = {
RET.OK: '成功',
RET.DBERR: '数据库查询错误',
RET.NODATA: '无数据',
RET.DATAEXIST: '数据已存在',
RET.DATAERR: '数据错误',
RET.SESSIONERR: '用户未登录',
RET.LOGINERR: '用户登录失败',
RET.PARAMERR: '参数错误',
RET.USERERR: '用户不存在或未激活',
RET.ROLEERR: '用户身份错误',
RET.PWDERR: '密码错误',
RET.REQERR: '非法请求或请求次数受限',
RET.IPERR: 'IP受限',
RET.THIRDERR: '第三方系统错误',
RET.IOERR: '文件读写错误',
RET.SERVERERR: '内部错误',
RET.UNKOWNERR: '未知错误',
}
| 20.675 | 30 | 0.553809 | class RET:
OK = '0'
DBERR = '4001'
NODATA = '4002'
DATAEXIST = '4003'
DATAERR = '4004'
SESSIONERR = '4101'
LOGINERR = '4102'
PARAMERR = '4103'
USERERR = '4104'
ROLEERR = '4105'
PWDERR = '4106'
REQERR = '4201'
IPERR = '4202'
THIRDERR = '4301'
IOERR = '4302'
SERVERERR = '4500'
UNKOWNERR = '4501'
error_map = {
RET.OK: '成功',
RET.DBERR: '数据库查询错误',
RET.NODATA: '无数据',
RET.DATAEXIST: '数据已存在',
RET.DATAERR: '数据错误',
RET.SESSIONERR: '用户未登录',
RET.LOGINERR: '用户登录失败',
RET.PARAMERR: '参数错误',
RET.USERERR: '用户不存在或未激活',
RET.ROLEERR: '用户身份错误',
RET.PWDERR: '密码错误',
RET.REQERR: '非法请求或请求次数受限',
RET.IPERR: 'IP受限',
RET.THIRDERR: '第三方系统错误',
RET.IOERR: '文件读写错误',
RET.SERVERERR: '内部错误',
RET.UNKOWNERR: '未知错误',
}
| true | true |
f7312e1e9ac00ea57e3041da27b8c0ce92fd33e4 | 3,146 | py | Python | manabi/apps/books/models.py | aehlke/manabi | 1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b | [
"MIT"
] | 14 | 2015-10-03T07:34:28.000Z | 2021-09-20T07:10:29.000Z | manabi/apps/books/models.py | aehlke/manabi | 1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b | [
"MIT"
] | 23 | 2019-10-25T08:47:23.000Z | 2022-01-30T02:00:45.000Z | manabi/apps/books/models.py | aehlke/manabi | 1dfdd4ecb9c1214b6a70268be0dcfeda9da8754b | [
"MIT"
] | 7 | 2016-10-04T08:10:36.000Z | 2021-09-20T07:10:33.000Z | from functools import wraps
from urllib.error import URLError
from django.db import models
from django.urls import reverse
#from amazonproduct import API as AmazonAPI
from manabi.apps.utils.slugs import slugify
from django.conf import settings
#TODO-OLD find different way.
#amazon_api = AmazonAPI(settings.AWS_KEY, settings.AWS_SECRET_KEY, 'us')
class DeckedTextbookManager(models.Manager):
def get_query_set(self):
return super(DeckedTextbookManager, self).get_query_set().filter(
deck__active=True, deck__shared=True).distinct()
def uses_amazon_api(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self.isbn:
raise Exception('Textbook has no ISBN.')
return func(self, *args, **kwargs)
return wrapped
class Textbook(models.Model):
objects = models.Manager()
decked_objects = DeckedTextbookManager()
slug = models.SlugField(blank=True) # Defaults to max_length=50
isbn = models.CharField(max_length=13)
custom_title = models.CharField(max_length=200, blank=True,
help_text='Set this to override the Amazon product name.')
#TODO-OLD student level field
class Meta:
app_label = 'flashcards'
def __unicode__(self):
try:
return self.get_basic_info()['title'] + ' [{0}]'.format(self.isbn)
except URLError:
return 'ISBN: {0}'.format(self.isbn)
def save(self, *args, **kwargs):
title = self.get_basic_info()['title']
self.slug = slugify(title)
super(Textbook, self).save(*args, **kwargs)
@property
def shared_decks(self):
return self.deck_set.filter(
active=True, shared=True)
def get_absolute_url(self):
if self.slug:
return reverse('book_detail_with_slug', (), {
'object_id': self.id,
'slug': self.slug,
})
else:
return reverse('book_detail_without_slug', (), {
'object_id': self.id,
})
@property
def cleaned_isbn(self):
return self.isbn.strip().replace('-', '')
def _item_lookup(self, **kwargs):
return
#TODO-OLD fix
return amazon_api.item_lookup(
self.cleaned_isbn, IdType='ISBN', SearchIndex='Books', **kwargs)
@uses_amazon_api
def get_image_urls(self):
'''
Returns a dict with each available image size:
{'size': 'url'}
'''
urls = {}
root = self._item_lookup(ResponseGroup='Images')
for size in ('Small', 'Medium', 'Large'):
urls[size.lower()] = getattr(root.Items.Item, size + 'Image').URL.pyval
return urls
@uses_amazon_api
def get_basic_info(self):
'''
Returns the following in a dict:
author
title
purchase_url
'''
root = self._item_lookup(ResponseGroup='Small')
attribs = root.Items.Item.ItemAttributes
return {
'author': attribs.Author.pyval,
'title': self.custom_title or attribs.Title.pyval,
}
| 28.862385 | 83 | 0.608392 | from functools import wraps
from urllib.error import URLError
from django.db import models
from django.urls import reverse
from manabi.apps.utils.slugs import slugify
from django.conf import settings
class DeckedTextbookManager(models.Manager):
def get_query_set(self):
return super(DeckedTextbookManager, self).get_query_set().filter(
deck__active=True, deck__shared=True).distinct()
def uses_amazon_api(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self.isbn:
raise Exception('Textbook has no ISBN.')
return func(self, *args, **kwargs)
return wrapped
class Textbook(models.Model):
objects = models.Manager()
decked_objects = DeckedTextbookManager()
slug = models.SlugField(blank=True)
isbn = models.CharField(max_length=13)
custom_title = models.CharField(max_length=200, blank=True,
help_text='Set this to override the Amazon product name.')
class Meta:
app_label = 'flashcards'
def __unicode__(self):
try:
return self.get_basic_info()['title'] + ' [{0}]'.format(self.isbn)
except URLError:
return 'ISBN: {0}'.format(self.isbn)
def save(self, *args, **kwargs):
title = self.get_basic_info()['title']
self.slug = slugify(title)
super(Textbook, self).save(*args, **kwargs)
@property
def shared_decks(self):
return self.deck_set.filter(
active=True, shared=True)
def get_absolute_url(self):
if self.slug:
return reverse('book_detail_with_slug', (), {
'object_id': self.id,
'slug': self.slug,
})
else:
return reverse('book_detail_without_slug', (), {
'object_id': self.id,
})
@property
def cleaned_isbn(self):
return self.isbn.strip().replace('-', '')
def _item_lookup(self, **kwargs):
return
return amazon_api.item_lookup(
self.cleaned_isbn, IdType='ISBN', SearchIndex='Books', **kwargs)
@uses_amazon_api
def get_image_urls(self):
urls = {}
root = self._item_lookup(ResponseGroup='Images')
for size in ('Small', 'Medium', 'Large'):
urls[size.lower()] = getattr(root.Items.Item, size + 'Image').URL.pyval
return urls
@uses_amazon_api
def get_basic_info(self):
root = self._item_lookup(ResponseGroup='Small')
attribs = root.Items.Item.ItemAttributes
return {
'author': attribs.Author.pyval,
'title': self.custom_title or attribs.Title.pyval,
}
| true | true |
f7312ef87c220e201a4ce0da73de12af13f9c8a5 | 17,506 | py | Python | packages/python/plotly/plotly/validators/_surface.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/_surface.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/_surface.py | labaran1/plotly.py | 7ec751e8fed4a570c11ea4bea2231806389d62eb | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="surface", parent_name="", **kwargs):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here z
or surfacecolor) or the bounds set in `cmin`
and `cmax` Defaults to `false` when `cmin` and
`cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `cmin` and/or `cmax` to be equidistant
to this point. Value should have the same units
as z or surfacecolor. Has no effect when
`cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Blac
kbody,Bluered,Blues,Cividis,Earth,Electric,Gree
ns,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,R
eds,Viridis,YlGnBu,YlOrRd.
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for `customdata`.
hidesurface
Determines whether or not a surface is drawn.
For example, set `hidesurface` to False
`contours.x.show` to True and `contours.y.show`
to True to draw a wire frame plot.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for `hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel
` instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-format/tre
e/v1.4.5#d3-format for details on the
formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on
the date formatting syntax. The variables
available in `hovertemplate` are the ones
emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for `hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for `hovertext`.
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for `ids`.
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrou
ptitle` instance or dict with compatible
properties
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
lighting
:class:`plotly.graph_objects.surface.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposit
ion` instance or dict with compatible
properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for `meta`.
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the surface. Please note
that in the case of using high `opacity` values
for example a value greater than or equal to
0.5 on two surfaces (and 0.25 with four
surfaces), an overlay of multiple transparent
surfaces may not perfectly be sorted in depth
by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be
an array containing arrays mapping a normalized
value to an opacity value. At minimum, a
mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1],
[0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and
those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette
name string of the following list: 'min',
'max', 'extremes' and 'uniform'. The default is
'uniform'.
reversescale
Reverses the color mapping if true. If true,
`cmin` will correspond to the last color in the
array and `cmax` will correspond to the first
color.
scene
Sets a reference between this trace's 3D
coordinate system and a 3D scene. If "scene"
(the default value), the (x,y,z) coordinates
refer to `layout.scene`. If "scene2", the
(x,y,z) coordinates refer to `layout.scene2`,
and so on.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
:class:`plotly.graph_objects.surface.Stream`
instance or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting
a color scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud
for `surfacecolor`.
text
Sets the text elements associated with each z
value. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements
will be seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for `text`.
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date
data.
xhoverformat
Sets the hover text formatting rulefor `x`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud
for `x`.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date
data.
yhoverformat
Sets the hover text formatting rulefor `y`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud
for `y`.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date
data.
zhoverformat
Sets the hover text formatting rulefor `z`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud
for `z`.
""",
),
**kwargs,
)
| 49.451977 | 72 | 0.542214 | import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="surface", parent_name="", **kwargs):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `colorscale`. In case
`colorscale` is unspecified or `autocolorscale`
is true, the default palette will be chosen
according to whether numbers in the `color`
array are all positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here z
or surfacecolor) or the bounds set in `cmin`
and `cmax` Defaults to `false` when `cmin` and
`cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `cmin` and/or `cmax` to be equidistant
to this point. Value should have the same units
as z or surfacecolor. Has no effect when
`cauto` is `false`.
cmin
Sets the lower bound of the color domain. Value
should have the same units as z or surfacecolor
and if set, `cmax` must be set as well.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.surface.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an
array containing arrays mapping a normalized
value to an rgb, rgba, hex, hsl, hsv, or named
color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required.
For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use `cmin` and
`cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Blac
kbody,Bluered,Blues,Cividis,Earth,Electric,Gree
ns,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,R
eds,Viridis,YlGnBu,YlOrRd.
connectgaps
Determines whether or not gaps (i.e. {nan} or
missing values) in the `z` data are filled in.
contours
:class:`plotly.graph_objects.surface.Contours`
instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be
useful when listening to hover, click and
selection events. Note that, "scatter" traces
also appends customdata items in the markers
DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud
for `customdata`.
hidesurface
Determines whether or not a surface is drawn.
For example, set `hidesurface` to False
`contours.x.show` to True and `contours.y.show`
to True to draw a wire frame plot.
hoverinfo
Determines which trace information appear on
hover. If `none` or `skip` are set, no
information is displayed upon hovering. But, if
`none` is set, click and hover events are still
fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud
for `hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.surface.Hoverlabel
` instance or dict with compatible properties
hovertemplate
Template string used for rendering the
information that appear on hover box. Note that
this will override `hoverinfo`. Variables are
inserted using %{variable}, for example "y:
%{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for
several points, "xother" will be added to those
with different x positions from the first
point. An underscore before or after
"(x|y)other" will add a space on that side,
only when this field is shown. Numbers are
formatted using d3-format's syntax
%{variable:d3-format}, for example "Price:
%{y:$.2f}". https://github.com/d3/d3-format/tre
e/v1.4.5#d3-format for details on the
formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on
the date formatting syntax. The variables
available in `hovertemplate` are the ones
emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-
events/#event-data. Additionally, every
attributes that can be specified per-point (the
ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud
for `hovertemplate`.
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud
for `hovertext`.
ids
Assigns id labels to each datum. These ids for
object constancy of data points during
animation. Should be an array of strings, not
numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud
for `ids`.
legendgroup
Sets the legend group for this trace. Traces
part of the same legend group hide/show at the
same time when toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.surface.Legendgrou
ptitle` instance or dict with compatible
properties
legendrank
Sets the legend rank for this trace. Items and
groups with smaller ranks are presented on
top/left side while with `*reversed*
`legend.traceorder` they are on bottom/right
side. The default legendrank is 1000, so that
you can use ranks less than 1000 to place
certain items before all unranked items, and
ranks greater than 1000 to go after all
unranked items.
lighting
:class:`plotly.graph_objects.surface.Lighting`
instance or dict with compatible properties
lightposition
:class:`plotly.graph_objects.surface.Lightposit
ion` instance or dict with compatible
properties
meta
Assigns extra meta information associated with
this trace that can be used in various text
attributes. Attributes such as trace `name`,
graph, axis and colorbar `title.text`,
annotation `text` `rangeselector`,
`updatemenues` and `sliders` `label` text all
support `meta`. To access the trace `meta`
values in an attribute in the same trace,
simply use `%{meta[i]}` where `i` is the index
or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or
key of the `meta` and `n` is the trace index.
metasrc
Sets the source reference on Chart Studio Cloud
for `meta`.
name
Sets the trace name. The trace name appear as
the legend item and on hover.
opacity
Sets the opacity of the surface. Please note
that in the case of using high `opacity` values
for example a value greater than or equal to
0.5 on two surfaces (and 0.25 with four
surfaces), an overlay of multiple transparent
surfaces may not perfectly be sorted in depth
by the webgl API. This behavior may be improved
in the near future and is subject to change.
opacityscale
Sets the opacityscale. The opacityscale must be
an array containing arrays mapping a normalized
value to an opacity value. At minimum, a
mapping for the lowest (0) and highest (1)
values are required. For example, `[[0, 1],
[0.5, 0.2], [1, 1]]` means that higher/lower
values would have higher opacity values and
those in the middle would be more transparent
Alternatively, `opacityscale` may be a palette
name string of the following list: 'min',
'max', 'extremes' and 'uniform'. The default is
'uniform'.
reversescale
Reverses the color mapping if true. If true,
`cmin` will correspond to the last color in the
array and `cmax` will correspond to the first
color.
scene
Sets a reference between this trace's 3D
coordinate system and a 3D scene. If "scene"
(the default value), the (x,y,z) coordinates
refer to `layout.scene`. If "scene2", the
(x,y,z) coordinates refer to `layout.scene2`,
and so on.
showlegend
Determines whether or not an item corresponding
to this trace is shown in the legend.
showscale
Determines whether or not a colorbar is
displayed for this trace.
stream
:class:`plotly.graph_objects.surface.Stream`
instance or dict with compatible properties
surfacecolor
Sets the surface color values, used for setting
a color scale independent of `z`.
surfacecolorsrc
Sets the source reference on Chart Studio Cloud
for `surfacecolor`.
text
Sets the text elements associated with each z
value. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements
will be seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud
for `text`.
uid
Assign an id to this trace, Use this to provide
object constancy between traces during
animations and transitions.
uirevision
Controls persistence of some user-driven
changes to the trace: `constraintrange` in
`parcoords` traces, as well as some `editable:
true` modifications such as `name` and
`colorbar.title`. Defaults to
`layout.uirevision`. Note that other user-
driven trace attribute changes are controlled
by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and
`colorbar.(x|y)` (accessible with `config:
{editable: true}`) is controlled by
`layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on
trace index if no `uid` is provided. So if your
app can add/remove traces before the end of the
`data` array, such that the same trace has a
different index, you can still preserve user-
driven changes if you give each trace a `uid`
that stays with it as it moves.
visible
Determines whether or not this trace is
visible. If "legendonly", the trace is not
drawn, but can appear as a legend item
(provided that the legend itself is visible).
x
Sets the x coordinates.
xcalendar
Sets the calendar system to use with `x` date
data.
xhoverformat
Sets the hover text formatting rulefor `x`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xsrc
Sets the source reference on Chart Studio Cloud
for `x`.
y
Sets the y coordinates.
ycalendar
Sets the calendar system to use with `y` date
data.
yhoverformat
Sets the hover text formatting rulefor `y`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
ysrc
Sets the source reference on Chart Studio Cloud
for `y`.
z
Sets the z coordinates.
zcalendar
Sets the calendar system to use with `z` date
data.
zhoverformat
Sets the hover text formatting rulefor `z`
using d3 formatting mini-languages which are
very similar to those in Python. For numbers,
see: https://github.com/d3/d3-format/tree/v1.4.
5#d3-format. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
*09~15~23.46*By default the values are
formatted using `zaxis.hoverformat`.
zsrc
Sets the source reference on Chart Studio Cloud
for `z`.
""",
),
**kwargs,
)
| true | true |
f7312fc89ccc2c815c810940a586f246776c1fe6 | 3,700 | py | Python | tests/test_mix_mpi.py | acamero/MIP-EGO | c566bce1e12baba6bbd63bab8ade7f2ac7d08bea | [
"MIT"
] | null | null | null | tests/test_mix_mpi.py | acamero/MIP-EGO | c566bce1e12baba6bbd63bab8ade7f2ac7d08bea | [
"MIT"
] | null | null | null | tests/test_mix_mpi.py | acamero/MIP-EGO | c566bce1e12baba6bbd63bab8ade7f2ac7d08bea | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 15:57:47 2017
@author: wangronin
"""
import pdb
import os
import pandas as pd
from mpi4py import MPI
import numpy as np
from deap import benchmarks
from mipego import mipego
from mipego.surrogate import RrandomForest, RandomForest
from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
runs = comm.Get_size()
def obj_func(x):
x_r, x_i, x_d = np.array(x[:2]), x[2], x[3]
if x_d == 'OK':
tmp = 0
else:
tmp = 1
return np.sum(x_r ** 2.) + abs(x_i - 10) / 123. + tmp * 2.
def create_optimizer(dim, fitness, n_step, n_init_sample, model_type):
C = ContinuousSpace([-5, 5]) * 2
I = OrdinalSpace([-100, 100])
N = NominalSpace(['OK', 'A', 'B', 'C', 'D', 'E'])
search_space = C * I * N
levels = search_space.levels
if model_type == 'GP':
# thetaL = 1e-3 * (ub - lb) * np.ones(dim)
# thetaU = 10 * (ub - lb) * np.ones(dim)
# theta0 = np.random.rand(dim) * (thetaU - thetaL) + thetaL
#
# model = GaussianProcess(regr='constant', corr='matern',
# theta0=theta0, thetaL=thetaL,
# thetaU=thetaU, nugget=1e-5,
# nugget_estim=False, normalize=False,
# verbose=False, random_start = 15*dim,
# random_state=None)
pass
elif model_type == 'sklearn-RF':
min_samples_leaf = max(1, int(n_init_sample / 20.))
max_features = int(np.ceil(dim * 5 / 6.))
model = RandomForest(levels=levels, n_estimators=100,
max_features=max_features,
min_samples_leaf=min_samples_leaf)
elif model_type == 'R-RF':
min_samples_leaf = max(1, int(n_init_sample / 20.))
max_features = int(np.ceil(dim * 5 / 6.))
model = RrandomForest(levels=levels, n_estimators=100,
max_features=max_features,
min_samples_leaf=min_samples_leaf)
opt = mipego(search_space, fitness, model, max_iter=n_step, random_seed=None,
n_init_sample=n_init_sample, minimize=True, optimizer='MIES')
return opt
dim = 4
n_step = 20
n_init_sample = 30
model_type = 'R-RF'
functions = {"mine": obj_func}
# generate, distribute and set the random seeds for reproducibility
if rank == 0:
np.random.seed(1)
seed = np.random.randint(0, 65535, runs)
if not os.path.exists('./data'):
os.makedirs('./data')
else:
seed = None
seed = comm.scatter(seed, root=0)
np.random.seed(seed)
for func_name, func in functions.iteritems():
if rank == 0:
print "testing on function:", func_name, "dim:", dim
y_hist_best = np.zeros((n_step, runs))
csv_name = './data/{}D-{}N-{}.csv'.format(dim, n_init_sample, func_name)
opt = create_optimizer(dim, func, n_step, n_init_sample, model_type)
opt.run()
hist_perf = opt.hist_perf
comm.Barrier()
__ = comm.gather(hist_perf, root=0)
if rank == 0:
data = np.atleast_2d(__)
data = data.T if data.shape[1] != runs else data
mean_ = np.mean(data, axis=1)
error_ = np.std(data, axis=1, ddof=1) / np.sqrt(runs)
print 'mean : ', mean_
print 'std error: ', error_
# append the new data the csv
df = pd.DataFrame(data)
df = pd.DataFrame(data, columns=['run{}'.format(_+1) for _ in range(runs)])
df.to_csv(csv_name, mode='w', header=True, index=False) | 31.623932 | 83 | 0.578649 |
"""
Created on Fri Aug 4 15:57:47 2017
@author: wangronin
"""
import pdb
import os
import pandas as pd
from mpi4py import MPI
import numpy as np
from deap import benchmarks
from mipego import mipego
from mipego.surrogate import RrandomForest, RandomForest
from mipego.SearchSpace import ContinuousSpace, NominalSpace, OrdinalSpace
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
runs = comm.Get_size()
def obj_func(x):
x_r, x_i, x_d = np.array(x[:2]), x[2], x[3]
if x_d == 'OK':
tmp = 0
else:
tmp = 1
return np.sum(x_r ** 2.) + abs(x_i - 10) / 123. + tmp * 2.
def create_optimizer(dim, fitness, n_step, n_init_sample, model_type):
C = ContinuousSpace([-5, 5]) * 2
I = OrdinalSpace([-100, 100])
N = NominalSpace(['OK', 'A', 'B', 'C', 'D', 'E'])
search_space = C * I * N
levels = search_space.levels
if model_type == 'GP':
pass
elif model_type == 'sklearn-RF':
min_samples_leaf = max(1, int(n_init_sample / 20.))
max_features = int(np.ceil(dim * 5 / 6.))
model = RandomForest(levels=levels, n_estimators=100,
max_features=max_features,
min_samples_leaf=min_samples_leaf)
elif model_type == 'R-RF':
min_samples_leaf = max(1, int(n_init_sample / 20.))
max_features = int(np.ceil(dim * 5 / 6.))
model = RrandomForest(levels=levels, n_estimators=100,
max_features=max_features,
min_samples_leaf=min_samples_leaf)
opt = mipego(search_space, fitness, model, max_iter=n_step, random_seed=None,
n_init_sample=n_init_sample, minimize=True, optimizer='MIES')
return opt
dim = 4
n_step = 20
n_init_sample = 30
model_type = 'R-RF'
functions = {"mine": obj_func}
if rank == 0:
np.random.seed(1)
seed = np.random.randint(0, 65535, runs)
if not os.path.exists('./data'):
os.makedirs('./data')
else:
seed = None
seed = comm.scatter(seed, root=0)
np.random.seed(seed)
for func_name, func in functions.iteritems():
if rank == 0:
print "testing on function:", func_name, "dim:", dim
y_hist_best = np.zeros((n_step, runs))
csv_name = './data/{}D-{}N-{}.csv'.format(dim, n_init_sample, func_name)
opt = create_optimizer(dim, func, n_step, n_init_sample, model_type)
opt.run()
hist_perf = opt.hist_perf
comm.Barrier()
__ = comm.gather(hist_perf, root=0)
if rank == 0:
data = np.atleast_2d(__)
data = data.T if data.shape[1] != runs else data
mean_ = np.mean(data, axis=1)
error_ = np.std(data, axis=1, ddof=1) / np.sqrt(runs)
print 'mean : ', mean_
print 'std error: ', error_
df = pd.DataFrame(data)
df = pd.DataFrame(data, columns=['run{}'.format(_+1) for _ in range(runs)])
df.to_csv(csv_name, mode='w', header=True, index=False) | false | true |
f731303f5f0120e8733ff5fb197045d0cce54435 | 34 | py | Python | env/lib/python3.9/site-packages/spline/__init__.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 30 | 2017-12-05T11:12:06.000Z | 2021-11-06T18:27:58.000Z | env/lib/python3.9/site-packages/spline/__init__.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 112 | 2017-10-15T12:13:38.000Z | 2021-01-12T22:29:58.000Z | env/lib/python3.9/site-packages/spline/__init__.py | AdnanKhan27/nicstestbed | d3136e23fda8bd09706eb55d9a8c44ff0ad90730 | [
"MIT"
] | 6 | 2018-08-12T17:01:52.000Z | 2021-08-17T06:05:24.000Z | """Package tool."""
VERSION = 0.1
| 11.333333 | 19 | 0.588235 | VERSION = 0.1
| true | true |
f73130757b4a0554d8ee6445c089aeb67d3e3931 | 4,942 | py | Python | tensor_rl/agents/bandits/LinUCBAgentClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | tensor_rl/agents/bandits/LinUCBAgentClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | tensor_rl/agents/bandits/LinUCBAgentClass.py | umd-huang-lab/reinforcement-learning-via-spectral-methods | c7bd04d7eea6869807ed70af76960dcc542b0a82 | [
"MIT"
] | null | null | null | '''
Basic LinUCB implementation.
'''
# Python imports.
import numpy as np
from collections import defaultdict
# Other imports.
from tensor_rl.agents.AgentClass import Agent
class LinUCBAgent(Agent):
'''
From:
Lihong Li, et al. "A Contextual-Bandit Approach to Personalized
News Article Recommendation." In Proceedings of the 19th
International Conference on World Wide Web (WWW), 2010.
'''
def __init__(self, actions, name="LinUCB", rand_init=True, context_size=1, alpha=1.5):
'''
Args:
actions (list): Contains a string for each action.
name (str)
context_size (int)
alpha (float): Uncertainty parameter.
'''
Agent.__init__(self, name, actions)
self.alpha = alpha
self.context_size = context_size
self.prev_context = None
self.step_number = 0
self.rand_init = rand_init
self._init_action_model(rand_init)
def get_parameters(self):
'''
Returns:
(dict) key=param_name (str) --> val=param_val (object).
'''
param_dict = defaultdict(int)
param_dict["rand_init"] = self.rand_init
param_dict["context_size"] = self.context_size
param_dict["alpha"] = self.alpha
return param_dict
def _init_action_model(self, rand_init=True):
'''
Summary:
Initializes model parameters
'''
self.model = {'act': {}, 'act_inv': {}, 'theta': {}, 'b': {}}
for action_id in range(len(self.actions)):
self.model['act'][action_id] = np.identity(self.context_size)
self.model['act_inv'][action_id] = np.identity(self.context_size)
if rand_init:
self.model['theta'][action_id] = np.random.random((self.context_size, 1))
else:
self.model['theta'][action_id] = np.zeros((self.context_size, 1))
self.model['b'][action_id] = np.zeros((self.context_size,1))
def _compute_score(self, context):
'''
Args:
context (list)
Returns:
(dict):
K (str): action
V (float): score
'''
a_inv = self.model['act_inv']
theta = self.model['theta']
estimated_reward = {}
uncertainty = {}
score_dict = {}
max_score = 0
for action_id in range(len(self.actions)):
action_context = np.reshape(context[action_id], (-1, 1))
estimated_reward[action_id] = float(theta[action_id].T.dot(action_context))
uncertainty[action_id] = float(self.alpha * np.sqrt(action_context.T.dot(a_inv[action_id]).dot(action_context)))
score_dict[action_id] = estimated_reward[action_id] + uncertainty[action_id]
return score_dict
def update(self, reward):
'''
Args:
reward (float)
Summary:
Updates self.model according to self.prev_context, self.prev_action, @reward.
'''
action_id = self.actions.index(self.prev_action)
action_context = np.reshape(self.prev_context[action_id], (-1, 1))
self.model['act'][action_id] += action_context.dot(action_context.T)
self.model['act_inv'][action_id] = np.linalg.inv(self.model['act'][action_id])
self.model['b'][action_id] += reward * action_context
self.model['theta'][action_id] = self.model['act_inv'][action_id].dot(self.model['b'][action_id])
def act(self, context, reward):
'''
Args:
context (iterable)
reward (float)
Returns:
(str): action.
'''
# Update previous context-action pair.
if self.prev_action is not None:
self.update(reward)
# Compute score.
context = self._pre_process_context(context)
score = self._compute_score(context)
# Compute best action.
best_action = np.random.choice(self.actions)
max_score = float("-inf")
for action_id in range(len(self.actions)):
if score[action_id] > max_score:
max_score = score[action_id]
best_action = self.actions[action_id]
# Update prev pointers.
self.prev_action = best_action
self.prev_context = context
self.step_number += 1
return best_action
def _pre_process_context(self, context):
if context.get_num_feats() == 1:
# If there's no context (that is, we're just in a regular bandit).
context = context.features()
if not hasattr(context[0], '__iter__'):
# If we only have a single context.
new_context = {}
for action_id in range(len(self.actions)):
new_context[action_id] = context
context = new_context
return context
| 32.300654 | 124 | 0.581951 |
import numpy as np
from collections import defaultdict
from tensor_rl.agents.AgentClass import Agent
class LinUCBAgent(Agent):
def __init__(self, actions, name="LinUCB", rand_init=True, context_size=1, alpha=1.5):
Agent.__init__(self, name, actions)
self.alpha = alpha
self.context_size = context_size
self.prev_context = None
self.step_number = 0
self.rand_init = rand_init
self._init_action_model(rand_init)
def get_parameters(self):
param_dict = defaultdict(int)
param_dict["rand_init"] = self.rand_init
param_dict["context_size"] = self.context_size
param_dict["alpha"] = self.alpha
return param_dict
def _init_action_model(self, rand_init=True):
self.model = {'act': {}, 'act_inv': {}, 'theta': {}, 'b': {}}
for action_id in range(len(self.actions)):
self.model['act'][action_id] = np.identity(self.context_size)
self.model['act_inv'][action_id] = np.identity(self.context_size)
if rand_init:
self.model['theta'][action_id] = np.random.random((self.context_size, 1))
else:
self.model['theta'][action_id] = np.zeros((self.context_size, 1))
self.model['b'][action_id] = np.zeros((self.context_size,1))
def _compute_score(self, context):
a_inv = self.model['act_inv']
theta = self.model['theta']
estimated_reward = {}
uncertainty = {}
score_dict = {}
max_score = 0
for action_id in range(len(self.actions)):
action_context = np.reshape(context[action_id], (-1, 1))
estimated_reward[action_id] = float(theta[action_id].T.dot(action_context))
uncertainty[action_id] = float(self.alpha * np.sqrt(action_context.T.dot(a_inv[action_id]).dot(action_context)))
score_dict[action_id] = estimated_reward[action_id] + uncertainty[action_id]
return score_dict
def update(self, reward):
action_id = self.actions.index(self.prev_action)
action_context = np.reshape(self.prev_context[action_id], (-1, 1))
self.model['act'][action_id] += action_context.dot(action_context.T)
self.model['act_inv'][action_id] = np.linalg.inv(self.model['act'][action_id])
self.model['b'][action_id] += reward * action_context
self.model['theta'][action_id] = self.model['act_inv'][action_id].dot(self.model['b'][action_id])
def act(self, context, reward):
if self.prev_action is not None:
self.update(reward)
context = self._pre_process_context(context)
score = self._compute_score(context)
best_action = np.random.choice(self.actions)
max_score = float("-inf")
for action_id in range(len(self.actions)):
if score[action_id] > max_score:
max_score = score[action_id]
best_action = self.actions[action_id]
self.prev_action = best_action
self.prev_context = context
self.step_number += 1
return best_action
def _pre_process_context(self, context):
if context.get_num_feats() == 1:
context = context.features()
if not hasattr(context[0], '__iter__'):
new_context = {}
for action_id in range(len(self.actions)):
new_context[action_id] = context
context = new_context
return context
| true | true |
f731324a891f6c3b18ab949c169e91cd701cd440 | 4,071 | py | Python | tests/ast_parser/analyzers/test_module_analyzer.py | FredHappyface/handsdown | 097cfd5addbed22ba8ab21d4657da24459b09667 | [
"MIT"
] | 47 | 2019-10-18T13:59:20.000Z | 2022-03-21T21:46:30.000Z | tests/ast_parser/analyzers/test_module_analyzer.py | FredHappyface/handsdown | 097cfd5addbed22ba8ab21d4657da24459b09667 | [
"MIT"
] | 15 | 2019-10-24T13:42:02.000Z | 2022-03-22T19:25:49.000Z | tests/ast_parser/analyzers/test_module_analyzer.py | FredHappyface/handsdown | 097cfd5addbed22ba8ab21d4657da24459b09667 | [
"MIT"
] | 7 | 2019-11-22T12:24:57.000Z | 2022-01-29T13:18:51.000Z | # pylint: disable=missing-docstring
import unittest
from unittest.mock import MagicMock
import handsdown.ast_parser.smart_ast as ast
from handsdown.ast_parser.analyzers.module_analyzer import ModuleAnalyzer
class TestModuleAnalyzer(unittest.TestCase):
def test_init(self):
analyzer = ModuleAnalyzer()
self.assertEqual(analyzer.all_names, [])
self.assertEqual(analyzer.import_nodes, [])
self.assertEqual(analyzer.function_nodes, [])
self.assertEqual(analyzer.attribute_nodes, [])
self.assertEqual(analyzer.class_nodes, [])
def test_visit_Import(self):
analyzer = ModuleAnalyzer()
node = "import_node"
self.assertIsNone(analyzer.visit_Import(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], node)
def test_visit_ImportFrom(self):
analyzer = ModuleAnalyzer()
node = "import_from_node"
self.assertIsNone(analyzer.visit_ImportFrom(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], "import_from_node")
def test_visit_ClassDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "MyClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
self.assertEqual(analyzer.class_nodes[0], node)
node.name = "_PrivateClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
def test_visit_FunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_AsyncFunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_Assign(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.mock_add_spec(ast.Assign)
node.value = "value"
target = MagicMock()
target.mock_add_spec(ast.Name)
target.id = "attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.attribute_nodes[0], node)
node.targets = [target, target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
node.targets = ["not_name_target"]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "_private_attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "__all__"
node.targets = [target]
name_1 = MagicMock()
name_1.mock_add_spec(ast.Str)
name_1.s = "MyClass"
name_2 = MagicMock()
name_2.mock_add_spec(ast.Str)
name_2.s = b"my_func"
value = MagicMock()
value.mock_add_spec(ast.List)
value.elts = [name_1, name_2, "not_name"]
node.value = value
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.all_names, ["MyClass", "my_func"])
| 37.348624 | 73 | 0.671825 |
import unittest
from unittest.mock import MagicMock
import handsdown.ast_parser.smart_ast as ast
from handsdown.ast_parser.analyzers.module_analyzer import ModuleAnalyzer
class TestModuleAnalyzer(unittest.TestCase):
def test_init(self):
analyzer = ModuleAnalyzer()
self.assertEqual(analyzer.all_names, [])
self.assertEqual(analyzer.import_nodes, [])
self.assertEqual(analyzer.function_nodes, [])
self.assertEqual(analyzer.attribute_nodes, [])
self.assertEqual(analyzer.class_nodes, [])
def test_visit_Import(self):
analyzer = ModuleAnalyzer()
node = "import_node"
self.assertIsNone(analyzer.visit_Import(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], node)
def test_visit_ImportFrom(self):
analyzer = ModuleAnalyzer()
node = "import_from_node"
self.assertIsNone(analyzer.visit_ImportFrom(node))
self.assertEqual(len(analyzer.import_nodes), 1)
self.assertEqual(analyzer.import_nodes[0], "import_from_node")
def test_visit_ClassDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "MyClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
self.assertEqual(analyzer.class_nodes[0], node)
node.name = "_PrivateClass"
self.assertIsNone(analyzer.visit_ClassDef(node))
self.assertEqual(len(analyzer.class_nodes), 1)
def test_visit_FunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_FunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_AsyncFunctionDef(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.name = "my_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
self.assertEqual(analyzer.function_nodes[0], node)
node.name = "_private_func"
self.assertIsNone(analyzer.visit_AsyncFunctionDef(node))
self.assertEqual(len(analyzer.function_nodes), 1)
def test_visit_Assign(self):
analyzer = ModuleAnalyzer()
node = MagicMock()
node.mock_add_spec(ast.Assign)
node.value = "value"
target = MagicMock()
target.mock_add_spec(ast.Name)
target.id = "attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.attribute_nodes[0], node)
node.targets = [target, target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
node.targets = ["not_name_target"]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "_private_attr"
node.targets = [target]
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
target.id = "__all__"
node.targets = [target]
name_1 = MagicMock()
name_1.mock_add_spec(ast.Str)
name_1.s = "MyClass"
name_2 = MagicMock()
name_2.mock_add_spec(ast.Str)
name_2.s = b"my_func"
value = MagicMock()
value.mock_add_spec(ast.List)
value.elts = [name_1, name_2, "not_name"]
node.value = value
self.assertIsNone(analyzer.visit_Assign(node))
self.assertEqual(len(analyzer.attribute_nodes), 1)
self.assertEqual(analyzer.all_names, ["MyClass", "my_func"])
| true | true |
f73132b3321a9a9b53f0f68a1273c7a905986331 | 18,983 | py | Python | test/generic/pointers/test_pointer_tensor.py | harshkasyap/PySyft | 4575a50f38b78728dafe2615aad9145dae17b085 | [
"Apache-2.0"
] | null | null | null | test/generic/pointers/test_pointer_tensor.py | harshkasyap/PySyft | 4575a50f38b78728dafe2615aad9145dae17b085 | [
"Apache-2.0"
] | null | null | null | test/generic/pointers/test_pointer_tensor.py | harshkasyap/PySyft | 4575a50f38b78728dafe2615aad9145dae17b085 | [
"Apache-2.0"
] | null | null | null | import torch
import torch as th
import syft
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.generic.pointers.pointer_tensor import PointerTensor
import pytest
def test_init(workers):
alice, me = workers["alice"], workers["me"]
pointer = PointerTensor(id=1000, location=alice, owner=me)
pointer.__str__()
def test_create_pointer():
x = torch.Tensor([1, 2])
x.create_pointer()
def test_send_default_garbage_collector_true(workers):
"""
Remote tensor should be garbage collected by default on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.child.garbage_collect_data
def test_send_garbage_collect_data_false(workers):
"""
Remote tensor should be not garbage collected on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.garbage_collection = False
assert x_ptr.child.garbage_collect_data is False
def test_send_gc_false(workers):
"""
Remote tensor should be not garbage collected on
deletion of the Pointer tensor pointing to remote tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.gc = False
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_gc_true(workers):
"""
Remote tensor by default is garbage collected on
deletion of Pointer Tensor
"""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.gc
def test_send_disable_gc(workers):
"""Pointer tensor should be not garbage collected."""
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice).disable_gc
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_get(workers):
"""Test several send get usages"""
bob = workers["bob"]
alice = workers["alice"]
# simple send
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_back = x_ptr.get()
assert (x == x_back).all()
# send with variable overwriting
x = torch.Tensor([1, 2])
x = x.send(bob)
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
# double send
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr_ptr = x_ptr.send(alice)
x_ptr_back = x_ptr_ptr.get()
x_back_back = x_ptr_back.get()
assert (x == x_back_back).all()
# double send with variable overwriting
x = torch.Tensor([1, 2])
x = x.send(bob)
x = x.send(alice)
x = x.get()
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
# chained double send
x = torch.Tensor([1, 2])
x = x.send(bob).send(alice)
x_back = x.get().get()
assert (torch.Tensor([1, 2]) == x_back).all()
def test_inplace_send_get(workers):
bob = workers["bob"]
tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])
tensor_ptr = tensor.send_(bob)
assert tensor_ptr.id == tensor.id
assert id(tensor_ptr) == id(tensor)
tensor_back = tensor_ptr.get_()
assert tensor_back.id == tensor_ptr.id
assert tensor_back.id == tensor.id
assert id(tensor_back) == id(tensor)
assert id(tensor_back) == id(tensor)
assert (tensor_back == tensor).all()
def test_repeated_send(workers):
"""Tests that repeated calls to .send(bob) works gracefully.
Previously garbage collection deleted the remote object
when .send() was called twice. This test ensures the fix still
works."""
bob = workers["bob"]
# create tensor
x = torch.Tensor([1, 2])
# send tensor to bob
x_ptr = x.send(bob)
# send tensor again
x_ptr = x.send(bob)
# ensure bob has tensor
assert x.id in bob.object_store._objects
def test_remote_autograd(workers):
"""Tests the ability to backpropagate gradients on a remote
worker."""
bob = workers["bob"]
# TEST: simple remote grad calculation
# create a tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
# send tensor to bob
x = x.send(bob)
# do some calculation
y = (x + x).sum()
# backpropagate on remote machine
y.backward()
# check that remote gradient is correct
x_grad = bob.object_store.get_obj(x.id_at_location).grad
x_grad_target = torch.ones(4).float() + 1
assert (x_grad == x_grad_target).all()
# TEST: Ensure remote grad calculation gets properly serded
# create tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)
# compute function
y = x.sum()
# backpropagate
y.backward()
# get the gradient created from backpropagation manually
x_grad = bob.object_store.get_obj(x.id_at_location).grad
# get the entire x tensor (should bring the grad too)
x = x.get()
# make sure that the grads match
assert (x.grad == x_grad).all()
def test_gradient_send_recv(workers):
"""Tests that gradients are properly sent and received along
with their tensors."""
bob = workers["bob"]
# create a tensor
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
# create gradient on tensor
x.sum().backward(th.tensor(1.0))
# save gradient
orig_grad = x.grad
# send and get back
t = x.send(bob).get()
# check that gradient was properly serde
assert (t.grad == orig_grad).all()
def test_method_on_attribute(workers):
bob = workers["bob"]
# create remote object with children
x = torch.Tensor([1, 2, 3])
x = syft.LoggingTensor().on(x).send(bob)
# call method on data tensor directly
x.child.point_to_attr = "child.child"
y = x.add(x)
assert isinstance(y.get(), torch.Tensor)
# call method on loggingtensor directly
x.child.point_to_attr = "child"
y = x.add(x)
y = y.get()
assert isinstance(y.child, syft.LoggingTensor)
# # call method on zeroth attribute
# x.child.point_to_attr = ""
# y = x.add(x)
# y = y.get()
#
# assert isinstance(y, torch.Tensor)
# assert isinstance(y.child, syft.LoggingTensor)
# assert isinstance(y.child.child, torch.Tensor)
# call .get() on pinter to attribute (should error)
x.child.point_to_attr = "child"
try:
x.get()
except syft.exceptions.CannotRequestObjectAttribute as e:
assert True
def test_grad_pointer(workers):
"""Tests the automatic creation of a .grad pointer when
calling .send() on a tensor with requires_grad==True"""
bob = workers["bob"]
x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)
y = (x + x).sum()
y.backward()
assert (bob.object_store.get_obj(x.id_at_location).grad == torch.tensor([2, 2, 2.0])).all()
def test_move(workers):
alice, bob, james, me = workers["alice"], workers["bob"], workers["james"], workers["me"]
x = torch.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
p = x.move(alice)
assert len(alice.object_store._tensors) == 1
# Test .move on remote objects
james.clear_objects()
x = th.tensor([1.0]).send(james)
remote_x = james.object_store.get_obj(x.id_at_location)
remote_ptr = remote_x.send(bob)
assert remote_ptr.id in james.object_store._objects.keys()
remote_ptr2 = remote_ptr.move(alice)
assert remote_ptr2.id in james.object_store._objects.keys()
# Test .move back to myself
alice.clear_objects()
bob.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5])
x = t.send(bob)
y = x.move(alice)
z = y.move(me)
assert (z == t).all()
# Move object to same location
alice.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
t = t.move(bob)
assert torch.all(torch.eq(t.get(), torch.tensor([1.0, 2, 3, 4, 5])))
def test_combine_pointers(workers):
"""
Ensure that the sy.combine_pointers works as expected
"""
bob = workers["bob"]
alice = workers["alice"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
y = th.tensor([1, 2, 3, 4, 5]).send(alice)
a = x.combine(y)
b = a + a
c = b.get(sum_results=True)
assert (c == th.tensor([4, 8, 12, 16, 20])).all()
b = a + a
c = b.get(sum_results=False)
assert len(c) == 2
assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all
def test_remote_to_cpu_device(workers):
"""Ensure remote .to cpu works"""
device = torch.device("cpu")
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
x.to(device)
def test_get_remote_shape(workers):
"""Test pointer.shape functionality"""
bob = workers["bob"]
# tensor directly sent: shape stored at sending
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.shape == torch.Size([5])
# result of an operation: need to make a call to the remote worker
y = x + x
assert y.shape == torch.Size([5])
def test_get_remote_ndim(workers):
"""Test pointer.ndim functionality"""
bob = workers["bob"]
x = th.rand(2, 3, 4).send(bob)
assert x.ndim == 3
def test_remote_T(workers):
"""Test pointer.T functionality"""
bob = workers["bob"]
x = th.rand(2, 3, 4)
bob_x = x.send(bob)
bob_xT = bob_x.T
assert bob_x.shape == torch.Size([2, 3, 4])
assert bob_xT.shape == torch.Size([4, 3, 2])
assert (bob_x.get() == x).all()
assert (bob_xT.get() == x.T).all()
def test_remote_function_with_multi_ouput(workers):
"""
Functions like .split return several tensors, registration and response
must be made carefully in this case
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
r_ptr = torch.split(ptr, 2)
assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
max_value, argmax_idx = torch.max(ptr, 0)
assert max_value.get().item() == 4.0
assert argmax_idx.get().item() == 3
def test_inplace_binary_method_with_non_pointers(workers):
"""Under very specific conditions, ie inplace methods containing a
single argument which is a Tensor, we allow automatic sending of
this tensor. This is helpful to facilitate utilizing python code
of other library for remote execution"""
alice = workers["alice"]
p = th.tensor([1.0, 2]).send(alice)
x = th.tensor([1.0, 1])
p += x
assert (p.get() == th.tensor([2.0, 3])).all()
def test_raising_error_when_item_func_called(workers):
pointer = PointerTensor(id=1000, location=workers["alice"], owner=workers["me"])
with pytest.raises(RuntimeError):
pointer.item()
def test_fix_prec_on_pointer_tensor(workers):
"""
Ensure .fix_precision() works as expected.
Also check that fix_precision() is not inplace.
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr_fp = ptr.fix_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
remote_fp_tensor = bob.object_store.get_obj(ptr_fp.id_at_location)
# check that fix_precision is not inplace
assert (remote_tensor == tensor).all()
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)
def test_fix_prec_on_pointer_of_pointer(workers):
"""
Ensure .fix_precision() works along a chain of pointers.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, FixedPrecisionTensor)
def test_float_prec_on_pointer_tensor(workers):
"""
Ensure .float_precision() works as expected.
"""
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_float_prec_on_pointer_of_pointer(workers):
"""
Ensure .float_precision() works along a chain of pointers.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_share_get(workers):
"""
Ensure .share() works as expected.
"""
bob = workers["bob"]
alice = workers["alice"]
charlie = workers["charlie"]
tensor = torch.tensor([1, 2, 3])
ptr = tensor.send(bob)
ptr = ptr.share(charlie, alice)
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, AdditiveSharingTensor)
def test_registration_of_action_on_pointer_of_pointer(workers):
"""
Ensure actions along a chain of pointers are registered as expected.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr_action = ptr + ptr
assert len(alice.object_store._tensors) == 2
assert len(bob.object_store._tensors) == 2
def test_setting_back_grad_to_origin_after_send(workers):
"""
Calling .backward() on a tensor sent using `.send(..., requires_grad=True)`
should update the origin tensor gradient
"""
me = workers["me"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y) # registration on the local worker is sometimes buggy
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z = z_ptr.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_setting_back_grad_to_origin_after_move(workers):
"""
Calling .backward() on a tensor moved using `.move(..., requires_grad=True)`
should update the origin tensor gradient
"""
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y) # registration on the local worker is sometimes buggy
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z_ptr2 = z_ptr.move(bob, requires_grad=True)
z = z_ptr2.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_remote_grad_fn(workers):
"""
Test that grad_fn can be accessed remotely
"""
alice = workers["alice"]
t = th.tensor([1.0, 1], requires_grad=True)
p = t.sum()
p.backward()
expected_type = type(p.grad_fn)
x = th.tensor([1.0, 1], requires_grad=True).send(alice)
p = x.sum()
p.backward()
p_grad_fn = p.child.grad_fn.child
assert isinstance(p_grad_fn, syft.PointerTensor)
remote_grad_fn = alice._objects[p_grad_fn.id_at_location]
assert type(remote_grad_fn.grad_fn) == expected_type
def test_iadd(workers):
alice = workers["alice"]
a = torch.ones(1, 5)
b = torch.ones(1, 5)
a_pt = a.send(alice)
b_pt = b.send(alice)
b_pt += a_pt
assert len(alice.object_store._objects) == 2
def test_inplace_ops_on_remote_long_tensor(workers):
alice = workers["alice"]
t = torch.LongTensor([2])
p = t.send_(alice) * 2
p.get_()
assert p == torch.LongTensor([4])
def test_iterable_pointer(workers):
alice = workers["alice"]
t = torch.Tensor([[1, 2], [4, 5], [7, 8]])
p = t.send(alice)
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p):
assert len(alice.object_store) == 2
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[idx])
assert len(alice.object_store) == 1
l = []
for idx, tensor in enumerate(p):
l.append(tensor)
assert len(alice.object_store) == 4
del l
del tensor
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p[:, 1]):
# Should be 3 because p[:, 1] will create another tensor on alice side
assert len(alice.object_store) == 3
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[:, 1][idx])
def test_register_hook_on_remote_tensor_or_modules(workers):
alice = workers["alice"]
# we need to set a storage object on the local worker
with syft.local_worker.registration_enabled():
## Tensor hook
flag = []
def hook_function(inputs, outputs):
flag.append(True) # pragma: no cover
p = th.tensor([1.0, 2], requires_grad=True).send(alice)
p.register_hook(hook_function)
assert len(flag) == 0
p.sum().backward()
assert len(flag) == 1
## Module hook
flag = []
def hook_function(model, inputs, outputs):
flag.append(True) # pragma: no cover
x = th.tensor([1.0, 2])
model = torch.nn.Linear(2, 1)
model.register_backward_hook(hook_function)
loss = model(x)
assert len(flag) == 0
loss.backward()
assert len(flag) == 1
| 26.661517 | 95 | 0.645788 | import torch
import torch as th
import syft
from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.generic.pointers.pointer_tensor import PointerTensor
import pytest
def test_init(workers):
alice, me = workers["alice"], workers["me"]
pointer = PointerTensor(id=1000, location=alice, owner=me)
pointer.__str__()
def test_create_pointer():
x = torch.Tensor([1, 2])
x.create_pointer()
def test_send_default_garbage_collector_true(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.child.garbage_collect_data
def test_send_garbage_collect_data_false(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.garbage_collection = False
assert x_ptr.child.garbage_collect_data is False
def test_send_gc_false(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
x_ptr.gc = False
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_gc_true(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice)
assert x_ptr.gc
def test_send_disable_gc(workers):
alice = workers["alice"]
x = torch.Tensor([-1, 2])
x_ptr = x.send(alice).disable_gc
assert x_ptr.child.garbage_collect_data is False
assert x_ptr.gc is False, "property GC is not in sync"
assert x_ptr.garbage_collection is False, "property garbage_collection is not in sync"
def test_send_get(workers):
bob = workers["bob"]
alice = workers["alice"]
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_back = x_ptr.get()
assert (x == x_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob)
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr_ptr = x_ptr.send(alice)
x_ptr_back = x_ptr_ptr.get()
x_back_back = x_ptr_back.get()
assert (x == x_back_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob)
x = x.send(alice)
x = x.get()
x_back = x.get()
assert (torch.Tensor([1, 2]) == x_back).all()
x = torch.Tensor([1, 2])
x = x.send(bob).send(alice)
x_back = x.get().get()
assert (torch.Tensor([1, 2]) == x_back).all()
def test_inplace_send_get(workers):
bob = workers["bob"]
tensor = torch.tensor([1.0, -1.0, 3.0, 4.0])
tensor_ptr = tensor.send_(bob)
assert tensor_ptr.id == tensor.id
assert id(tensor_ptr) == id(tensor)
tensor_back = tensor_ptr.get_()
assert tensor_back.id == tensor_ptr.id
assert tensor_back.id == tensor.id
assert id(tensor_back) == id(tensor)
assert id(tensor_back) == id(tensor)
assert (tensor_back == tensor).all()
def test_repeated_send(workers):
bob = workers["bob"]
x = torch.Tensor([1, 2])
x_ptr = x.send(bob)
x_ptr = x.send(bob)
assert x.id in bob.object_store._objects
def test_remote_autograd(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
x = x.send(bob)
y = (x + x).sum()
y.backward()
x_grad = bob.object_store.get_obj(x.id_at_location).grad
x_grad_target = torch.ones(4).float() + 1
assert (x_grad == x_grad_target).all()
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True).send(bob)
y = x.sum()
y.backward()
x_grad = bob.object_store.get_obj(x.id_at_location).grad
x = x.get()
assert (x.grad == x_grad).all()
def test_gradient_send_recv(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3, 4.0], requires_grad=True)
x.sum().backward(th.tensor(1.0))
orig_grad = x.grad
t = x.send(bob).get()
assert (t.grad == orig_grad).all()
def test_method_on_attribute(workers):
bob = workers["bob"]
x = torch.Tensor([1, 2, 3])
x = syft.LoggingTensor().on(x).send(bob)
x.child.point_to_attr = "child.child"
y = x.add(x)
assert isinstance(y.get(), torch.Tensor)
x.child.point_to_attr = "child"
y = x.add(x)
y = y.get()
assert isinstance(y.child, syft.LoggingTensor)
x.child.point_to_attr = "child"
try:
x.get()
except syft.exceptions.CannotRequestObjectAttribute as e:
assert True
def test_grad_pointer(workers):
bob = workers["bob"]
x = torch.tensor([1, 2, 3.0], requires_grad=True).send(bob)
y = (x + x).sum()
y.backward()
assert (bob.object_store.get_obj(x.id_at_location).grad == torch.tensor([2, 2, 2.0])).all()
def test_move(workers):
alice, bob, james, me = workers["alice"], workers["bob"], workers["james"], workers["me"]
x = torch.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
x = torch.tensor([1.0, 2, 3, 4, 5], requires_grad=True).send(bob)
assert x.id_at_location in bob.object_store._objects
assert x.id_at_location not in alice.object_store._objects
p = x.move(alice)
assert x.id_at_location not in bob.object_store._objects
assert x.id_at_location in alice.object_store._objects
alice.clear_objects()
bob.clear_objects()
x = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
p = x.move(alice)
assert len(alice.object_store._tensors) == 1
james.clear_objects()
x = th.tensor([1.0]).send(james)
remote_x = james.object_store.get_obj(x.id_at_location)
remote_ptr = remote_x.send(bob)
assert remote_ptr.id in james.object_store._objects.keys()
remote_ptr2 = remote_ptr.move(alice)
assert remote_ptr2.id in james.object_store._objects.keys()
alice.clear_objects()
bob.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5])
x = t.send(bob)
y = x.move(alice)
z = y.move(me)
assert (z == t).all()
alice.clear_objects()
t = torch.tensor([1.0, 2, 3, 4, 5]).send(bob)
t = t.move(bob)
assert torch.all(torch.eq(t.get(), torch.tensor([1.0, 2, 3, 4, 5])))
def test_combine_pointers(workers):
bob = workers["bob"]
alice = workers["alice"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
y = th.tensor([1, 2, 3, 4, 5]).send(alice)
a = x.combine(y)
b = a + a
c = b.get(sum_results=True)
assert (c == th.tensor([4, 8, 12, 16, 20])).all()
b = a + a
c = b.get(sum_results=False)
assert len(c) == 2
assert (c[0] == th.tensor([2, 4, 6, 8, 10])).all
def test_remote_to_cpu_device(workers):
device = torch.device("cpu")
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
x.to(device)
def test_get_remote_shape(workers):
bob = workers["bob"]
x = th.tensor([1, 2, 3, 4, 5]).send(bob)
assert x.shape == torch.Size([5])
y = x + x
assert y.shape == torch.Size([5])
def test_get_remote_ndim(workers):
bob = workers["bob"]
x = th.rand(2, 3, 4).send(bob)
assert x.ndim == 3
def test_remote_T(workers):
bob = workers["bob"]
x = th.rand(2, 3, 4)
bob_x = x.send(bob)
bob_xT = bob_x.T
assert bob_x.shape == torch.Size([2, 3, 4])
assert bob_xT.shape == torch.Size([4, 3, 2])
assert (bob_x.get() == x).all()
assert (bob_xT.get() == x.T).all()
def test_remote_function_with_multi_ouput(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
r_ptr = torch.split(ptr, 2)
assert (r_ptr[0].get() == torch.tensor([1, 2.0])).all()
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
max_value, argmax_idx = torch.max(ptr, 0)
assert max_value.get().item() == 4.0
assert argmax_idx.get().item() == 3
def test_inplace_binary_method_with_non_pointers(workers):
alice = workers["alice"]
p = th.tensor([1.0, 2]).send(alice)
x = th.tensor([1.0, 1])
p += x
assert (p.get() == th.tensor([2.0, 3])).all()
def test_raising_error_when_item_func_called(workers):
pointer = PointerTensor(id=1000, location=workers["alice"], owner=workers["me"])
with pytest.raises(RuntimeError):
pointer.item()
def test_fix_prec_on_pointer_tensor(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr_fp = ptr.fix_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
remote_fp_tensor = bob.object_store.get_obj(ptr_fp.id_at_location)
assert (remote_tensor == tensor).all()
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_fp_tensor.child, FixedPrecisionTensor)
def test_fix_prec_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, FixedPrecisionTensor)
def test_float_prec_on_pointer_tensor(workers):
bob = workers["bob"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_float_prec_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr = ptr.fix_precision()
ptr = ptr.float_precision()
alice_tensor = alice.object_store.get_obj(ptr.id_at_location)
remote_tensor = bob.object_store.get_obj(alice_tensor.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor, torch.Tensor)
def test_share_get(workers):
bob = workers["bob"]
alice = workers["alice"]
charlie = workers["charlie"]
tensor = torch.tensor([1, 2, 3])
ptr = tensor.send(bob)
ptr = ptr.share(charlie, alice)
remote_tensor = bob.object_store.get_obj(ptr.id_at_location)
assert isinstance(ptr.child, PointerTensor)
assert isinstance(remote_tensor.child, AdditiveSharingTensor)
def test_registration_of_action_on_pointer_of_pointer(workers):
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr_action = ptr + ptr
assert len(alice.object_store._tensors) == 2
assert len(bob.object_store._tensors) == 2
def test_setting_back_grad_to_origin_after_send(workers):
me = workers["me"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y)
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z = z_ptr.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_setting_back_grad_to_origin_after_move(workers):
me = workers["me"]
bob = workers["bob"]
alice = workers["alice"]
with me.registration_enabled():
x = th.tensor([1.0, 2.0, 3, 4, 5], requires_grad=True)
y = x + x
me.register_obj(y)
y_ptr = y.send(alice, requires_grad=True)
z_ptr = y_ptr * 2
z_ptr2 = z_ptr.move(bob, requires_grad=True)
z = z_ptr2.sum()
z.backward()
assert (x.grad == th.tensor([4.0, 4.0, 4.0, 4.0, 4.0])).all()
def test_remote_grad_fn(workers):
alice = workers["alice"]
t = th.tensor([1.0, 1], requires_grad=True)
p = t.sum()
p.backward()
expected_type = type(p.grad_fn)
x = th.tensor([1.0, 1], requires_grad=True).send(alice)
p = x.sum()
p.backward()
p_grad_fn = p.child.grad_fn.child
assert isinstance(p_grad_fn, syft.PointerTensor)
remote_grad_fn = alice._objects[p_grad_fn.id_at_location]
assert type(remote_grad_fn.grad_fn) == expected_type
def test_iadd(workers):
alice = workers["alice"]
a = torch.ones(1, 5)
b = torch.ones(1, 5)
a_pt = a.send(alice)
b_pt = b.send(alice)
b_pt += a_pt
assert len(alice.object_store._objects) == 2
def test_inplace_ops_on_remote_long_tensor(workers):
alice = workers["alice"]
t = torch.LongTensor([2])
p = t.send_(alice) * 2
p.get_()
assert p == torch.LongTensor([4])
def test_iterable_pointer(workers):
alice = workers["alice"]
t = torch.Tensor([[1, 2], [4, 5], [7, 8]])
p = t.send(alice)
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p):
assert len(alice.object_store) == 2
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[idx])
assert len(alice.object_store) == 1
l = []
for idx, tensor in enumerate(p):
l.append(tensor)
assert len(alice.object_store) == 4
del l
del tensor
assert len(alice.object_store) == 1
for idx, tensor in enumerate(p[:, 1]):
assert len(alice.object_store) == 3
assert isinstance(tensor, PointerTensor)
assert torch.all(tensor.get() == t[:, 1][idx])
def test_register_hook_on_remote_tensor_or_modules(workers):
alice = workers["alice"]
with syft.local_worker.registration_enabled():
g = []
def hook_function(inputs, outputs):
flag.append(True)
p = th.tensor([1.0, 2], requires_grad=True).send(alice)
p.register_hook(hook_function)
assert len(flag) == 0
p.sum().backward()
assert len(flag) == 1
g = []
def hook_function(model, inputs, outputs):
flag.append(True)
x = th.tensor([1.0, 2])
model = torch.nn.Linear(2, 1)
model.register_backward_hook(hook_function)
loss = model(x)
assert len(flag) == 0
loss.backward()
assert len(flag) == 1
| true | true |
f73133716abf93447f9a681574d785a552cadd2f | 877 | py | Python | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | 6 | 2015-08-06T00:54:48.000Z | 2022-02-03T13:55:33.000Z | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | null | null | null | setup.py | jomido/jogger | d105a5d701c7958bb5ad072af4c23477e82cd363 | [
"MIT"
] | 1 | 2015-05-19T11:45:34.000Z | 2015-05-19T11:45:34.000Z | from setuptools import setup, find_packages
setup(
name='jogger',
version='0.1.1',
description='Navigate log files.',
long_description=(
open('README.md').read()
),
url='http://github.com/jomido/jogger/',
license='MIT',
author='Jonathan Dobson',
author_email='jon.m.dobson@gmail.com',
packages=[
'jogger'
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Text Processing',
'Topic :: System :: Logging'
],
) | 29.233333 | 54 | 0.586089 | from setuptools import setup, find_packages
setup(
name='jogger',
version='0.1.1',
description='Navigate log files.',
long_description=(
open('README.md').read()
),
url='http://github.com/jomido/jogger/',
license='MIT',
author='Jonathan Dobson',
author_email='jon.m.dobson@gmail.com',
packages=[
'jogger'
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Text Processing',
'Topic :: System :: Logging'
],
) | true | true |
f73134a89fd7d8c446480e1eb914cc01383bee8e | 4,859 | py | Python | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2018_02_14/operations/_private_link_resources_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2018_02_14/operations/_private_link_resources_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2018_02_14/operations/_private_link_resources_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations(object):
"""PrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2018_02_14.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_vault(
self,
resource_group_name, # type: str
vault_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.PrivateLinkResourceListResult"
"""Gets the private link resources supported for the key vault.
:param resource_group_name: Name of the resource group that contains the key vault.
:type resource_group_name: str
:param vault_name: The name of the key vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResourceListResult, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2018_02_14.models.PrivateLinkResourceListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-14"
accept = "application/json"
# Construct URL
url = self.list_by_vault.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_vault.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateLinkResources'} # type: ignore
| 46.27619 | 191 | 0.686149 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_vault(
self,
resource_group_name,
vault_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-14"
accept = "application/json"
url = self.list_by_vault.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_vault.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/privateLinkResources'}
| true | true |
f73134cfd4c3af902e244125da2780eb7d8fee85 | 16,270 | py | Python | libgoods/libgoods/noaa_coops.py | NOAA-ORR-ERD/GnomeTools | a9ff592bd9c7ed098f6081367aa35eae525c9774 | [
"Unlicense"
] | 2 | 2017-02-15T20:45:42.000Z | 2020-10-09T16:00:00.000Z | libgoods/libgoods/noaa_coops.py | NOAA-ORR-ERD/GnomeTools | a9ff592bd9c7ed098f6081367aa35eae525c9774 | [
"Unlicense"
] | 10 | 2015-06-25T23:42:11.000Z | 2021-06-22T16:19:19.000Z | libgoods/libgoods/noaa_coops.py | NOAA-ORR-ERD/GnomeTools | a9ff592bd9c7ed098f6081367aa35eae525c9774 | [
"Unlicense"
] | 15 | 2016-01-11T20:49:10.000Z | 2020-10-15T18:02:20.000Z | #!/usr/bin/env python
from __future__ import print_function
import datetime
try:
from urllib.request import urlopen, Request #py3
except ImportError:
from urllib2 import urlopen, Request #py2
import requests
from netCDF4 import Dataset
import os, glob
'''
Methods for generating ordered filelist for a time series of CO-OPS data
(Nowcasts + Forecasts) based on user specified start and end dates. If end
date is unspecified or greater than datetime.utcnow() the latest forecast
will be automatically be appended.
Notes on COOPS naming and aggregations:
Nowcast and forecast files are created four times a day. Output is hourly in
individual files. So each update generates 6 nowcast files and 48 forecast files
The update cycle time will be the last model output timestep in the nowcast
files and the first timestep in the forecast files
Example filenames from one update cycle (20141027.t15z):
Nowcast:
nos.ngofs.fields.n000.20141027.t15z.nc
nos.ngofs.fields.n001.20141027.t15z.nc
...
nos.ngofs.fields.n006.20141027.t15z.nc
Forecast:
nos.ngofs.fields.f000.20141027.t15z.nc
nos.ngofs.fields.f002.20141027.t15z.nc
...
nos.ngofs.fields.f048.20141027.t15z.nc
So to make a time series, use subsequent nowcasts updates strung together sequentially
by update date/time then by n0001-n005 (leave off the last one as it overlaps with
the next set of files)
Similarly append the forecast that is the same update cycle as the most recent nowcast
Confusing? Yes. Run the code and look at the output, the hard work is already done :)
!!!!!Important note: this is for newer ROMS and FVCOM models only. The POM models
still have old file structure with more than one time step per file
'''
# def specify_bnd_types(grid,segs,ss_land_nodes=[]):
# '''
# The node values were determined by plotting grid, they
# are not included in the model output
# Land_bnd_segs are needed to get the boundary right for subset grids only
# They are obtained by tri_grid remap_bry_nodes method
# '''
# if grid.lower() == 'ngofs':
# ow = list(range(1,180))
# elif grid.lower() == 'nwgofs':
# ow = list(range(1,207))
# elif grid.lower() == 'negofs':
# ow = list(range(1,139))
# elif grid.lower() == 'creofs':
# ow = [68408,68409,68410,68411,68412,68414,68604,68605,68606,68607,68608,68791,68792,68793,68962,68963,68964,68965,69130,69131,69132,69133,69303,69304,69305,69479,69481,69669,69670,69671,69672,69674,69675,69866,69867,69868,69869,69870,70062,70063,70064,70065,70271,70272,70489,70490,70704,70705,70927,70928,71144,71346,71520,71683,71844,72001,72154,72281,72377,72462,72532,72583,72631,72676,72720,72765,72810,72851,72897,72939,72981,73023,73061,73099,73138,73178,73215,73251,73283,73313,73346,73381,73417,73453,73454,73481,73502,73523]
# elif grid.lower() == 'sfbofs':
# ow = [1,2,3,4,5,97,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,144,51,52,53,54,55,150,56,57,58,59,60,61,62,63,64,65,66,162,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91]
# elif grid.lower() == 'gom3':
# ow = list(range(1,120))
# else:
# ow = [1,10000]
# seg_types= []
# if len(ss_land_nodes) > 0: #subset
# for seg in segs:
# if seg[0] in ss_land_nodes and seg[1] in ss_land_nodes:
# seg_types.append(0)
# else:
# seg_types.append(1)
# else:
# for seg in segs:
# if seg[0] in ow and seg[1] in ow:
# seg_types.append(1)
# else:
# seg_types.append(0)
# return seg_types
def make_server_filelist(model,hour0,start,end=None,test_exist=False):
'''
Create a list of model file urls for an aggregated time series based on
user specified start and end dates
Args:
model (string): The COOPS OFS (e.g. NGOFS)
hour0 (int): The first hour that the model is updated on
For triangular grid models this is typically 3
For ROMS models this is typically 0
start (datetime.date): Look for model output beginning
on this date
end (datetime.date): Look for model output ending before
this date (if None or > datetime.utcnow() append latest forecast,
it will not be truncated so it may go beyond end date)
test_exists(bool): Set to True when accessing files from COOPS server
and want to check existence before operating on them
Returns:
flist (list): List of urls
'''
flist = []
stem = 'https://opendap.co-ops.nos.noaa.gov/thredds/dodsC/NOAA/' + model.upper() + '/MODELS/'
sdate = datetime.datetime.combine(start,datetime.time(hour0,0))
if end is None or end > datetime.datetime.utcnow().date() - datetime.timedelta(hours=8):
edate = datetime.datetime.utcnow() - datetime.timedelta(hours=8)
append_fc = 1
else:
edate = datetime.datetime.combine(end,datetime.time(hour0,0))
append_fc = 0
while sdate <= edate:
ym = str(sdate.year) + str(sdate.month).zfill(2)
ymd = ym + str(sdate.day).zfill(2)
h = str(sdate.hour).zfill(2)
fname = stem + ym + '/nos.' + model.lower() + '.fields.n000.' + ymd + '.t' + h + 'z.nc'
agg = make_agg(fname,type='nc')
flist.extend(agg)
sdate = sdate + datetime.timedelta(days=.25) #nowcast files are 6 hourly
#check files exist by looking for 404 error
if test_exist:
flist = [f for f in flist if test_server_existence(f + '.html')]
if append_fc:
last_nc = flist[-1].split('/')[-1].split('n005.')[-1]
fc_file0 = stem + ym + '/nos.' + model.lower() + '.fields.f000.' + last_nc
fc_flist = make_agg(fc_file0)
flist.extend(fc_flist)
return flist
def sort_local_files(local_dir,model):
'''
Create a filelist for an aggregated time series in local directory
Returns:
flist (list): List of absolute filepaths
'''
nc0_files = glob.glob(os.path.join(local_dir,'*n000*'))
flist = []
for f in nc0_files:
nc_complete = True #Add nc files if all 6 hours are there
agg = make_agg(f,'nc')
for f in agg:
if not os.path.exists(f):
nc_complete = False
if nc_complete:
flist.extend(agg)
fc0_file = flist[-1].replace('n005','f000')
fc_complete = True #Add nc files if all 6 hours are there
agg = make_agg(fc0_file,'fc')
for f in agg:
if not os.path.exists(f):
fc_complete = False
if fc_complete:
flist.extend(agg)
return flist, nc_complete + fc_complete
def make_agg(fc_file0,type='fc'):
if type == 'fc':
num_files = 48
elif type == 'nc':
num_files = 5
# here we leave off the last file in order to make best time series of nowcast files
# there is a one hour overlap between adjacent nowcasts
elif type == 'spec':
num_files = [3,6]
else:
print('Type must be fc or nc')
a,b = fc_file0.split('000')
if not type == 'spec':
agg = [fc_file0,]
for h in range(1,num_files+1):
agg.append(a + str(h).zfill(3) + b)
else:
agg = []
for h in num_files:
agg.append(a + str(h).zfill(3) + b)
return agg
def test_existence(url):
req = Request(url)
try:
urlopen(req)
exists = True
except:
print('Not found: ', url)
exists = False
return exists
def test_server_existence(url):
resp = requests.get(url)
if resp.status_code == 200:
exists = True
else:
print('Not found: ', url)
exists = False
return exists
def download_and_save(url,output_dir):
nc_in = Dataset(url)
fname = url.split('/')[-1]
nc_out = Dataset(os.path.join(output_dir,fname),'w')
#Copy dimensions
for dname, the_dim in nc_in.dimensions.items():
nc_out.createDimension(dname, len(the_dim))
for var in ['time','u','v']:
varin = nc_in.variables[var]
varout = nc_out.createVariable(var, varin.datatype, varin.dimensions)
varout[:] = varin[:]
for name in varin.ncattrs():
value = getattr(varin,name)
setattr(varout, name, value)
nc_in.close()
nc_out.close()
def ofs_info(ofs):
ofs = ofs.upper()
if ofs == 'CREOFS':
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs.html" target="_blank">
Columbia River Estuary Operational Forecast System (CREOFS)</a> was
jointly developed by the <a href="http://www.ohsu.edu/xd/" target="_blank">
Oregon Health & Science University (OHSU)</a>,
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a> and
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>.
The CREOFS model domain includes the upper and lower Columbia River and Estuary.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs_info.html" target="_blank">
model information page</a>.
'''
elif any(x in ofs for x in ['NGOFS','NEGOFS','NWGOFS']):
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs.html" target="_blank">
Northern Gulf of Mexico Operational Forecast System (NGOFS)</a>
including two nested Northeast and Northwest Gulf of Mexico Operational
Forecast Systems (NEGOFS/NWGOFS)
has been developed to serve the maritime user community.
NGOFS was developed in a joint project of the
<a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). NGOFS generates water level, current, temperature and salinity
nowcast and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs_info.html" target="_blank">
model information page.</a>
'''
elif any(x in ofs for x in ['DBOFS','TBOFS','CBOFS']):
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs.html" target="_blank">
Chesapeake Bay Operational Forecast System (CBOFS)</a> was developed by
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service/Office of Coast Survey</a> in a joint project
with the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS/Center for Operational Oceanographic Products and Services
(CO-OPS)</a> and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service/National Centers for
Environmental Prediction (NCEP) Central Operations (NCO)</a> using
<a href="http://www.myroms.org/" target="_blank">Rutgers
University's Regional Ocean Modeling System (ROMS)</a>.
CBOFS generates water level, current, temperature and salinity nowcast
and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs_info.html" target="_blank">
model information page.</a>
'''
if ofs == 'DBOFS':
info = info.replace('cbofs','dbofs')
info = info.replace('CBOFS','DBOFS')
info = info.replace('Chesapeake','Delaware')
elif ofs == 'TBOFS':
info = info.replace('cbofs','tbofs')
info = info.replace('CBOFS','TBOFS')
info = info.replace('Chesapeake','Tampa')
elif ofs == 'SFBOFS':
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs.html" target="_blank">
San Francisco Bay Operational Forecast System (SFBOFS)</a>
has been developed to serve the San Francisco Bay maritime communities.
SFBOFS was jointly developed by <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM).The NWS and NOS work together to run SFBOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs_info.html" target="_blank">
model information page.</a>
'''
elif ofs == 'LEOFS':
info = '''
The upgraded <a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs.html" target="_blank">
Lake Erie Operational Forecast System (LEOFS)</a> was jointly developed by the
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>
and <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
Office of Coast Survey</a>, <a href="http://www.glerl.noaa.gov/" target="_blank">
the Great Lakes Environmental Research Laboratory (GLERL)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>,
and the<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). The NWS and NOS work together to run LEOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs_info.html" target="_blank">
model information page.</a>
'''
else:
return ''
return info
#if __name__ == "__main__":
# ofs = 'ngofs'
# hour0 = 3
# #sdate = datetime.date.today()-datetime.timedelta(days=14)
# sdate = datetime.date(2014,10,28)
# flist = make_server_filelist(ofs,3,sdate)
# output_dir = 'C:\\Users\\amy.macfadyen\\Documents\\Projects\\goods\\trunk\\static\\ocean_models\\COOPS\\NGOFS'
# for f in flist:
# nc = Dataset(f)
# t = nc.variables['time']
# ts = num2date(t[:],t.units)
# print ts, '...writing'
# download_and_save(f,output_dir)
| 42.041344 | 544 | 0.631776 |
from __future__ import print_function
import datetime
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import requests
from netCDF4 import Dataset
import os, glob
# The node values were determined by plotting grid, they
# are not included in the model output
# Land_bnd_segs are needed to get the boundary right for subset grids only
# They are obtained by tri_grid remap_bry_nodes method
# '''
def make_server_filelist(model,hour0,start,end=None,test_exist=False):
flist = []
stem = 'https://opendap.co-ops.nos.noaa.gov/thredds/dodsC/NOAA/' + model.upper() + '/MODELS/'
sdate = datetime.datetime.combine(start,datetime.time(hour0,0))
if end is None or end > datetime.datetime.utcnow().date() - datetime.timedelta(hours=8):
edate = datetime.datetime.utcnow() - datetime.timedelta(hours=8)
append_fc = 1
else:
edate = datetime.datetime.combine(end,datetime.time(hour0,0))
append_fc = 0
while sdate <= edate:
ym = str(sdate.year) + str(sdate.month).zfill(2)
ymd = ym + str(sdate.day).zfill(2)
h = str(sdate.hour).zfill(2)
fname = stem + ym + '/nos.' + model.lower() + '.fields.n000.' + ymd + '.t' + h + 'z.nc'
agg = make_agg(fname,type='nc')
flist.extend(agg)
sdate = sdate + datetime.timedelta(days=.25)
if test_exist:
flist = [f for f in flist if test_server_existence(f + '.html')]
if append_fc:
last_nc = flist[-1].split('/')[-1].split('n005.')[-1]
fc_file0 = stem + ym + '/nos.' + model.lower() + '.fields.f000.' + last_nc
fc_flist = make_agg(fc_file0)
flist.extend(fc_flist)
return flist
def sort_local_files(local_dir,model):
nc0_files = glob.glob(os.path.join(local_dir,'*n000*'))
flist = []
for f in nc0_files:
nc_complete = True
agg = make_agg(f,'nc')
for f in agg:
if not os.path.exists(f):
nc_complete = False
if nc_complete:
flist.extend(agg)
fc0_file = flist[-1].replace('n005','f000')
fc_complete = True
agg = make_agg(fc0_file,'fc')
for f in agg:
if not os.path.exists(f):
fc_complete = False
if fc_complete:
flist.extend(agg)
return flist, nc_complete + fc_complete
def make_agg(fc_file0,type='fc'):
if type == 'fc':
num_files = 48
elif type == 'nc':
num_files = 5
elif type == 'spec':
num_files = [3,6]
else:
print('Type must be fc or nc')
a,b = fc_file0.split('000')
if not type == 'spec':
agg = [fc_file0,]
for h in range(1,num_files+1):
agg.append(a + str(h).zfill(3) + b)
else:
agg = []
for h in num_files:
agg.append(a + str(h).zfill(3) + b)
return agg
def test_existence(url):
req = Request(url)
try:
urlopen(req)
exists = True
except:
print('Not found: ', url)
exists = False
return exists
def test_server_existence(url):
resp = requests.get(url)
if resp.status_code == 200:
exists = True
else:
print('Not found: ', url)
exists = False
return exists
def download_and_save(url,output_dir):
nc_in = Dataset(url)
fname = url.split('/')[-1]
nc_out = Dataset(os.path.join(output_dir,fname),'w')
for dname, the_dim in nc_in.dimensions.items():
nc_out.createDimension(dname, len(the_dim))
for var in ['time','u','v']:
varin = nc_in.variables[var]
varout = nc_out.createVariable(var, varin.datatype, varin.dimensions)
varout[:] = varin[:]
for name in varin.ncattrs():
value = getattr(varin,name)
setattr(varout, name, value)
nc_in.close()
nc_out.close()
def ofs_info(ofs):
ofs = ofs.upper()
if ofs == 'CREOFS':
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs.html" target="_blank">
Columbia River Estuary Operational Forecast System (CREOFS)</a> was
jointly developed by the <a href="http://www.ohsu.edu/xd/" target="_blank">
Oregon Health & Science University (OHSU)</a>,
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a> and
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>.
The CREOFS model domain includes the upper and lower Columbia River and Estuary.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/creofs/creofs_info.html" target="_blank">
model information page</a>.
'''
elif any(x in ofs for x in ['NGOFS','NEGOFS','NWGOFS']):
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs.html" target="_blank">
Northern Gulf of Mexico Operational Forecast System (NGOFS)</a>
including two nested Northeast and Northwest Gulf of Mexico Operational
Forecast Systems (NEGOFS/NWGOFS)
has been developed to serve the maritime user community.
NGOFS was developed in a joint project of the
<a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). NGOFS generates water level, current, temperature and salinity
nowcast and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/ngofs/ngofs_info.html" target="_blank">
model information page.</a>
'''
elif any(x in ofs for x in ['DBOFS','TBOFS','CBOFS']):
info = '''
The <a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs.html" target="_blank">
Chesapeake Bay Operational Forecast System (CBOFS)</a> was developed by
the <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service/Office of Coast Survey</a> in a joint project
with the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS/Center for Operational Oceanographic Products and Services
(CO-OPS)</a> and the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service/National Centers for
Environmental Prediction (NCEP) Central Operations (NCO)</a> using
<a href="http://www.myroms.org/" target="_blank">Rutgers
University's Regional Ocean Modeling System (ROMS)</a>.
CBOFS generates water level, current, temperature and salinity nowcast
and forecast guidance four times per day.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/cbofs/cbofs_info.html" target="_blank">
model information page.</a>
'''
if ofs == 'DBOFS':
info = info.replace('cbofs','dbofs')
info = info.replace('CBOFS','DBOFS')
info = info.replace('Chesapeake','Delaware')
elif ofs == 'TBOFS':
info = info.replace('cbofs','tbofs')
info = info.replace('CBOFS','TBOFS')
info = info.replace('Chesapeake','Tampa')
elif ofs == 'SFBOFS':
info = '''
A <a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs.html" target="_blank">
San Francisco Bay Operational Forecast System (SFBOFS)</a>
has been developed to serve the San Francisco Bay maritime communities.
SFBOFS was jointly developed by <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
NOAA/National Ocean Service's (NOS) Office of Coast Survey </a>,
the <a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>, and the
<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM).The NWS and NOS work together to run SFBOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/sfbofs/sfbofs_info.html" target="_blank">
model information page.</a>
'''
elif ofs == 'LEOFS':
info = '''
The upgraded <a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs.html" target="_blank">
Lake Erie Operational Forecast System (LEOFS)</a> was jointly developed by the
<a href="http://tidesandcurrents.noaa.gov/" target="_blank">
NOAA/NOS Center for Operational Oceanographic Products and Services (CO-OPS)</a>
and <a href="http://www.nauticalcharts.noaa.gov/" target="_blank">
Office of Coast Survey</a>, <a href="http://www.glerl.noaa.gov/" target="_blank">
the Great Lakes Environmental Research Laboratory (GLERL)</a>,
the <a href="http://mag.ncep.noaa.gov" target="_blank">
NOAA/National Weather Service's (NWS) National Centers
for Environmental Prediction (NCEP) Central Operations (NCO)</a>,
and the<a href="http://fvcom.smast.umassd.edu/" target="_blank">
University of Massachusetts, Dartmouth </a> using the Finite Volume Coastal Ocean
Model (FVCOM). The NWS and NOS work together to run LEOFS operationally.
For detailed model information, visit the NOAA CO-OPS
<a href="http://tidesandcurrents.noaa.gov/ofs/leofs/leofs_info.html" target="_blank">
model information page.</a>
'''
else:
return ''
return info
| true | true |
f73135263aa5d42f8ba22ba5ad4466b2c6a05dc0 | 2,549 | py | Python | python2/koans/about_modules.py | rameshugar/koans | 35f2407dac045040bfd54ebe9f95ce77fd8a1b23 | [
"MIT"
] | null | null | null | python2/koans/about_modules.py | rameshugar/koans | 35f2407dac045040bfd54ebe9f95ce77fd8a1b23 | [
"MIT"
] | null | null | null | python2/koans/about_modules.py | rameshugar/koans | 35f2407dac045040bfd54ebe9f95ce77fd8a1b23 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This is very different to AboutModules in Ruby Koans
# Our AboutMultipleInheritance class is a little more comparable
#
from runner.koan import *
from another_local_module import *
from local_module_with_all_defined import *
class AboutModules(Koan):
def test_importing_other_python_scripts_as_modules(self):
import local_module # local_module.py
duck = local_module.Duck()
self.assertEqual("Daffy", duck.name)
def test_importing_attributes_from_classes_using_from_keyword(self):
from local_module import Duck
duck = Duck() # no module qualifier needed this time
self.assertEqual("Daffy", duck.name)
def test_we_can_import_multiple_items_at_once(self):
import jims, joes
jims_dog = jims.Dog()
joes_dog = joes.Dog()
self.assertEqual("jims dog", jims_dog.identify())
self.assertEqual("joes dog", joes_dog.identify())
def test_importing_all_module_attributes_at_once(self):
"""
importing all attributes at once is done like so:
from another_local_module import *
The import wildcard cannot be used from within classes or functions.
"""
goose = Goose()
hamster = Hamster()
self.assertEqual("Mr Stabby", goose.name)
self.assertEqual("Phil", hamster.name)
def test_modules_hide_attributes_prefixed_by_underscores(self):
try:
private_squirrel = _SecretSquirrel()
except NameError as ex:
self.assertMatch("global name '_SecretSquirrel' is not defined", ex[0])
def test_private_attributes_are_still_accessible_in_modules(self):
from local_module import Duck # local_module.py
duck = Duck()
self.assertEqual("password", duck._password)
# module level attribute hiding doesn't affect class attributes
# (unless the class itself is hidden).
def test_a_modules_XallX_statement_limits_what_wildcards_will_match(self):
"""Examine results of from local_module_with_all_defined import *"""
# 'Goat' is on the __all__ list
goat = Goat()
self.assertEqual("George", goat.name)
# How about velociraptors?
lizard = _Velociraptor()
self.assertEqual("Cuddles", lizard.name)
# SecretDuck? Never heard of her!
try:
duck = SecretDuck()
except NameError as ex:
self.assertMatch("global name 'SecretDuck' is not defined", ex[0])
| 32.265823 | 83 | 0.673597 |
from runner.koan import *
from another_local_module import *
from local_module_with_all_defined import *
class AboutModules(Koan):
def test_importing_other_python_scripts_as_modules(self):
import local_module
duck = local_module.Duck()
self.assertEqual("Daffy", duck.name)
def test_importing_attributes_from_classes_using_from_keyword(self):
from local_module import Duck
duck = Duck()
self.assertEqual("Daffy", duck.name)
def test_we_can_import_multiple_items_at_once(self):
import jims, joes
jims_dog = jims.Dog()
joes_dog = joes.Dog()
self.assertEqual("jims dog", jims_dog.identify())
self.assertEqual("joes dog", joes_dog.identify())
def test_importing_all_module_attributes_at_once(self):
goose = Goose()
hamster = Hamster()
self.assertEqual("Mr Stabby", goose.name)
self.assertEqual("Phil", hamster.name)
def test_modules_hide_attributes_prefixed_by_underscores(self):
try:
private_squirrel = _SecretSquirrel()
except NameError as ex:
self.assertMatch("global name '_SecretSquirrel' is not defined", ex[0])
def test_private_attributes_are_still_accessible_in_modules(self):
from local_module import Duck
duck = Duck()
self.assertEqual("password", duck._password)
# (unless the class itself is hidden).
def test_a_modules_XallX_statement_limits_what_wildcards_will_match(self):
# 'Goat' is on the __all__ list
goat = Goat()
self.assertEqual("George", goat.name)
# How about velociraptors?
lizard = _Velociraptor()
self.assertEqual("Cuddles", lizard.name)
# SecretDuck? Never heard of her!
try:
duck = SecretDuck()
except NameError as ex:
self.assertMatch("global name 'SecretDuck' is not defined", ex[0])
| true | true |
f7313861d3e6c67d85bfeb4b43faf3df2789455b | 8,380 | py | Python | data/torch_151_data/sampler.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | 1 | 2018-12-09T06:09:29.000Z | 2018-12-09T06:09:29.000Z | data/torch_151_data/sampler.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | data/torch_151_data/sampler.py | jihuacao/Putil | b753fc94bea4cbda00f483681c55f0e9f54adef2 | [
"Apache-2.0"
] | null | null | null | import torch
from torch._six import int_classes as _int_classes
class Sampler(object):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an :meth:`__iter__` method, providing a
way to iterate over indices of dataset elements, and a :meth:`__len__` method
that returns the length of the returned iterators.
.. note:: The :meth:`__len__` method isn't strictly required by
:class:`~torch.utils.data.DataLoader`, but is expected in any
calculation involving the length of a :class:`~torch.utils.data.DataLoader`.
"""
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
# NOTE [ Lack of Default `__len__` in Python Abstract Base Classes ]
#
# Many times we have an abstract class representing a collection/iterable of
# data, e.g., `torch.utils.data.Sampler`, with its subclasses optionally
# implementing a `__len__` method. In such cases, we must make sure to not
# provide a default implementation, because both straightforward default
# implementations have their issues:
#
# + `return NotImplemented`:
# Calling `len(subclass_instance)` raises:
# TypeError: 'NotImplementedType' object cannot be interpreted as an integer
#
# + `raise NotImplementedError()`:
# This prevents triggering some fallback behavior. E.g., the built-in
# `list(X)` tries to call `len(X)` first, and executes a different code
# path if the method is not found or `NotImplemented` is returned, while
# raising an `NotImplementedError` will propagate and and make the call
# fail where it could have use `__iter__` to complete the call.
#
# Thus, the only two sensible things to do are
#
# + **not** provide a default `__len__`.
#
# + raise a `TypeError` instead, which is what Python uses when users call
# a method that is not defined on an object.
# (@ssnl verifies that this works on at least Python 3.7.)
class SequentialSampler(Sampler):
r"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self):
return len(self.data_source)
class RandomSampler(Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
"""
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
class SubsetRandomSampler(Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
class WeightedRandomSampler(Sampler):
r"""Samples elements from ``[0,..,len(weights)-1]`` with given probabilities (weights).
Args:
weights (sequence) : a sequence of weights, not necessary summing up to one
num_samples (int): number of samples to draw
replacement (bool): if ``True``, samples are drawn with replacement.
If not, they are drawn without replacement, which means that when a
sample index is drawn for a row, it cannot be drawn again for that row.
Example:
>>> list(WeightedRandomSampler([0.1, 0.9, 0.4, 0.7, 3.0, 0.6], 5, replacement=True))
[4, 4, 1, 4, 5]
>>> list(WeightedRandomSampler([0.9, 0.4, 0.05, 0.2, 0.3, 0.1], 5, replacement=False))
[0, 1, 4, 3, 2]
"""
def __init__(self, weights, num_samples, replacement=True):
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist())
def __len__(self):
return self.num_samples
class BatchSampler(Sampler):
r"""Wraps another sampler to yield a mini-batch of indices.
Args:
sampler (Sampler): Base sampler.
batch_size (int): Size of mini-batch.
drop_last (bool): If ``True``, the sampler will drop the last batch if
its size would be less than ``batch_size``
Example:
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=False))
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> list(BatchSampler(SequentialSampler(range(10)), batch_size=3, drop_last=True))
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
| 39.342723 | 100 | 0.631742 | import torch
from torch._six import int_classes as _int_classes
class Sampler(object):
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self):
return len(self.data_source)
class RandomSampler(Sampler):
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
if not isinstance(self.replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
if self.replacement:
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist())
return iter(torch.randperm(n).tolist())
def __len__(self):
return self.num_samples
class SubsetRandomSampler(Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
class WeightedRandomSampler(Sampler):
def __init__(self, weights, num_samples, replacement=True):
if not isinstance(num_samples, _int_classes) or isinstance(num_samples, bool) or \
num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = torch.as_tensor(weights, dtype=torch.double)
self.num_samples = num_samples
self.replacement = replacement
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, self.replacement).tolist())
def __len__(self):
return self.num_samples
class BatchSampler(Sampler):
def __init__(self, sampler, batch_size, drop_last):
if not isinstance(sampler, Sampler):
raise ValueError("sampler should be an instance of "
"torch.utils.data.Sampler, but got sampler={}"
.format(sampler))
if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
if not isinstance(drop_last, bool):
raise ValueError("drop_last should be a boolean value, but got "
"drop_last={}".format(drop_last))
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
| true | true |
f73138dea25be614ca8fc1cc6c899fbfc0321a36 | 6,537 | py | Python | car_client.py | wangbiao0327/car | 2632de357107beeb240b330f20ec5ac5fb568beb | [
"MIT"
] | 1 | 2018-12-18T10:58:34.000Z | 2018-12-18T10:58:34.000Z | car_client.py | wangbiao0327/car | 2632de357107beeb240b330f20ec5ac5fb568beb | [
"MIT"
] | null | null | null | car_client.py | wangbiao0327/car | 2632de357107beeb240b330f20ec5ac5fb568beb | [
"MIT"
] | null | null | null | """
此模块做停车管理系统的客户端
Author:Recall
Date: 2018-10-19
module: socket、multiprocessing、sys、os、time、signal
Email:
"""
from socket import *
from setting import *
from messageAff import user_message
from multiprocessing import Process
import sys,os,time,signal
class carClient(object):
def __init__(self):
self.sockfd = socket(AF_INET,SOCK_STREAM)
self.sockfd.connect(ADDR)
self.mes = user_message()
signal.signal(signal.SIGINT,self.dis_signal)
def dis_signal(self,sig,frame):
if sig == signal.SIGINT:
self.sockfd.send(b'quit')
sys.exit("强制退出")
elif sig == signal.SIGQUIT:
self.sockfd.send(b'quit')
def Get_email_verify_code(self,username,email):
'''
获取验证码
将用户名跟邮箱发送到服务器进行数值判断,并根据判断进行返回
返回值:verify_code or False
'''
data = 'select_email %s %s'% (username,email)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == 'ok':
auth_code = self.mes.my_email(email)
return auth_code
else:
return [False,"你输入的邮箱与注册的邮箱不一致"]
def Modify_password(self, username, password):
'''
此函数用来处理密码的修改
参数:用户名 密码
将用户密码发送到服务器,根据服务器信息确认处理
返回值:True or False
'''
data = "change_password %s %s" % (username,password)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
return False
def Modify_username(self,olduesrname,newusername):
'''
此函数用来修改用户名称
参数:旧用户名 新用户名
将新旧用户名发送到服务器,并根据服务器返回值进行返回
返回值:
成功:True
失败:
[False,"用户名早已被使用"]
[False,'修改用户名失败']
'''
data = 'change_username %s %s' % (olduesrname,newusername)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
elif aff == "nameisuser":
return [False,"用户名早已被使用"]
return [False,'修改用户名失败']
def Personal_information_display(self,username):
'''
此函数用来获取用户信息
参数:用户名
向服务器发送用户名,通过服务器返回值进行返回
'''
data = "select_user_message %s" % username
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
user_list = aff.split(" ")
if user_list[0] == "ok":
return user_list[1:]
return [False,"未找到用户信息"]
def Personal_information_edit(self,username,phone_number):
'''
此函数用来修改用户信息
参数:用户名 联系方式
发送到服务器修改用户的联系方式
返回值:True or False
'''
data = "change_user_message %s %s" % (username,phone_number)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
return True
def Select_history_recording(self,username,aff=0):
'''
向服务器获取用户历史记录
参数:用户名 偏识标量
aff:偏识标量,假设用户有十五条历史记录,传入aff=2则返回结果为用户第11条至第15条的历史记录
返回历史记录,每次返回5条,不足五条或五条返回全部历史记录
返回值:
有历史记录:[True,[],[]....],每个小列表为一条记录
无历史记录:[False]
'''
data = "get_history_msg %s %d" % (username,aff)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(4096).decode()
history_list =[True]
if aff != "error":
history_str = aff.split(" ")
for i in history_str:
record = i.split("##")
history_list.append(record)
return history_list
return [False]
def Login(self,username,password):
'''
此类函数用处理用户登录
参数:用户名 密码
返回值:
成功:True
失败:
[False,"用户名或密码错误"]
[False,"你已经在线,不能重复登录"]
获取用户账号和密码,并发送给服务器
'''
message = 'login %s %s' % (username, password)
self.sockfd.send(message.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
elif aff == "passerror":
return [False,"用户名或密码错误"]
elif aff == "online":
return [False,"你已经在线,不能重复登录"]
return [False, "用户名或密码错误"]
def Register(self, username,password,phone_number,car_factory,car_model,car_color,car_plate,email):
'''
此类方法用来处理用户注册功能
初步判断用户信息是否合法,并将信息发送给服务器进行处理
返回值:
成功:True
失败:
[False,"该用户名早已进行注册"]
[False,"该车牌号早已进行注册"]
'''
L = [username,password,phone_number,car_factory,car_model,car_color,car_plate,email]
data_list = ["regist"] + L
data = " ".join(data_list)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == 'ok':
return True
return [False,aff]
def User_quit(self,username):
'''
此函数在用户退出时修改用户的登录状态
参数:用户名
返回值:True or False
'''
data = 'quit %s' % username
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
return False
def Select_weath_message(self,city):
"""
此函数用来获取天气信息
参数:城市名
返回值为列表:
成功:[True,{}],如:[True, {'wind_power': '<3级', 'min_temper': '17', 'wind_direction': '无持续风向', 'weather': '多云'}]
注意字典气温参数,夜间为最低气温min_temper,白天为最高气温max_temper
失败:[False]
"""
data = "select_weath_message %s" % city
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff != "error":
weath_list = aff.split(" ")
now_hour = time.localtime().tm_hour
if 6 <= now_hour <= 18:
dic = {
"weather":weath_list[0],
"wind_direction":weath_list[1],
"wind_power":weath_list[2],
"max_temper":weath_list[3]
}
else:
dic = {
"weather": weath_list[0],
"wind_direction": weath_list[1],
"wind_power": weath_list[2],
"min_temper": weath_list[3]
}
return [True,dic]
return [False]
def send_email(self, my_email):
self.mes.my_email(my_email)
if __name__ == "__main__":
client = carClient()
| 28.176724 | 120 | 0.528224 |
from socket import *
from setting import *
from messageAff import user_message
from multiprocessing import Process
import sys,os,time,signal
class carClient(object):
def __init__(self):
self.sockfd = socket(AF_INET,SOCK_STREAM)
self.sockfd.connect(ADDR)
self.mes = user_message()
signal.signal(signal.SIGINT,self.dis_signal)
def dis_signal(self,sig,frame):
if sig == signal.SIGINT:
self.sockfd.send(b'quit')
sys.exit("强制退出")
elif sig == signal.SIGQUIT:
self.sockfd.send(b'quit')
def Get_email_verify_code(self,username,email):
data = 'select_email %s %s'% (username,email)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == 'ok':
auth_code = self.mes.my_email(email)
return auth_code
else:
return [False,"你输入的邮箱与注册的邮箱不一致"]
def Modify_password(self, username, password):
data = "change_password %s %s" % (username,password)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
return False
def Modify_username(self,olduesrname,newusername):
data = 'change_username %s %s' % (olduesrname,newusername)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
elif aff == "nameisuser":
return [False,"用户名早已被使用"]
return [False,'修改用户名失败']
def Personal_information_display(self,username):
data = "select_user_message %s" % username
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
user_list = aff.split(" ")
if user_list[0] == "ok":
return user_list[1:]
return [False,"未找到用户信息"]
def Personal_information_edit(self,username,phone_number):
data = "change_user_message %s %s" % (username,phone_number)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
return True
def Select_history_recording(self,username,aff=0):
data = "get_history_msg %s %d" % (username,aff)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(4096).decode()
history_list =[True]
if aff != "error":
history_str = aff.split(" ")
for i in history_str:
record = i.split("##")
history_list.append(record)
return history_list
return [False]
def Login(self,username,password):
message = 'login %s %s' % (username, password)
self.sockfd.send(message.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
elif aff == "passerror":
return [False,"用户名或密码错误"]
elif aff == "online":
return [False,"你已经在线,不能重复登录"]
return [False, "用户名或密码错误"]
def Register(self, username,password,phone_number,car_factory,car_model,car_color,car_plate,email):
L = [username,password,phone_number,car_factory,car_model,car_color,car_plate,email]
data_list = ["regist"] + L
data = " ".join(data_list)
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == 'ok':
return True
return [False,aff]
def User_quit(self,username):
data = 'quit %s' % username
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff == "ok":
return True
return False
def Select_weath_message(self,city):
data = "select_weath_message %s" % city
self.sockfd.send(data.encode())
aff = self.sockfd.recv(1024).decode()
if aff != "error":
weath_list = aff.split(" ")
now_hour = time.localtime().tm_hour
if 6 <= now_hour <= 18:
dic = {
"weather":weath_list[0],
"wind_direction":weath_list[1],
"wind_power":weath_list[2],
"max_temper":weath_list[3]
}
else:
dic = {
"weather": weath_list[0],
"wind_direction": weath_list[1],
"wind_power": weath_list[2],
"min_temper": weath_list[3]
}
return [True,dic]
return [False]
def send_email(self, my_email):
self.mes.my_email(my_email)
if __name__ == "__main__":
client = carClient()
| true | true |
f73138feb9a7f803855bf63268835f5d0728508e | 4,019 | py | Python | ubicacion/migrations/0004_auto_20180426_1619.py | jlopez0591/SIGIA | e857e2273daa43ab64fa78df254275af2dbcc2a5 | [
"MIT"
] | null | null | null | ubicacion/migrations/0004_auto_20180426_1619.py | jlopez0591/SIGIA | e857e2273daa43ab64fa78df254275af2dbcc2a5 | [
"MIT"
] | 7 | 2020-02-12T00:42:15.000Z | 2022-03-11T23:23:48.000Z | ubicacion/migrations/0004_auto_20180426_1619.py | jlopez0591/SIGIA | e857e2273daa43ab64fa78df254275af2dbcc2a5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-04-26 21:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('ubicacion', '0003_auto_20180417_1603'),
]
operations = [
migrations.AddField(
model_name='carrera',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='carrera',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='carrerainstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='carrerainstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='departamento',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='departamento',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='departamentoinstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='departamentoinstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='escuela',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='escuela',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='escuelainstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='escuelainstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='facultad',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='facultad',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='facultadinstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='facultadinstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='sede',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='sede',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| 34.646552 | 89 | 0.591689 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('ubicacion', '0003_auto_20180417_1603'),
]
operations = [
migrations.AddField(
model_name='carrera',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='carrera',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='carrerainstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='carrerainstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='departamento',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='departamento',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='departamentoinstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='departamentoinstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='escuela',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='escuela',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='escuelainstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='escuelainstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='facultad',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='facultad',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='facultadinstancia',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='facultadinstancia',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='sede',
name='fecha_actualizacion',
field=models.DateField(auto_now=True),
),
migrations.AddField(
model_name='sede',
name='fecha_creacion',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| true | true |
f73139736d7eeb275bd78656e3b4701fb97eabe7 | 4,427 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/operations/usage_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/operations/usage_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/operations/usage_operations.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsageOperations(object):
"""UsageOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2017-12-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-12-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
"""Gets, for the specified location, the current compute resource usage
information as well as the limits for compute resources under the
subscription.
:param location: The location for which resource usage is queried.
:type location: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Usage
:rtype:
~azure.mgmt.compute.v2017_12_01.models.UsagePaged[~azure.mgmt.compute.v2017_12_01.models.Usage]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'}
| 40.990741 | 144 | 0.630901 |
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class UsageOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-12-01"
self.config = config
def list(
self, location, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.UsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'}
| true | true |
f7313beb6b36e0b6947bf44f357978b77188d33a | 4,841 | py | Python | radish/extensions/syslog_writer.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
] | null | null | null | radish/extensions/syslog_writer.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
] | null | null | null | radish/extensions/syslog_writer.py | tuxrosi/radish | b21fa751f8dfc4309451476151c810b44975babb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module provides an extension to write all features, scenarios and steps to the syslog.
"""
from __future__ import unicode_literals
from radish.terrain import world
from radish.feature import Feature
from radish.hookregistry import before, after
from radish.extensionregistry import extension
@extension
class SyslogWriter(object):
"""
Syslog Writer radish extension. This extension is only supported on
systems where the Python standard library supports the system logger
(syslog). For example, this extension works on UNIX and UNIX-like
systems (Linux), but will not work on Windows.
"""
OPTIONS = [
("--syslog", "log all of your features, scenarios, and steps to the syslog")
]
LOAD_IF = staticmethod(lambda config: config.syslog)
LOAD_PRIORITY = 40
def __init__(self):
# import syslog only if the extension got loaded
# but not if the module got loaded.
import syslog
before.all(self.syslog_writer_before_all)
before.each_feature(self.syslog_writer_before_each_feature)
before.each_scenario(self.syslog_writer_before_each_scenario)
before.each_step(self.syslog_writer_before_each_step)
after.all(self.syslog_writer_after_all)
after.each_feature(self.syslog_writer_after_each_feature)
after.each_scenario(self.syslog_writer_after_each_scenario)
after.each_step(self.syslog_writer_after_each_step)
def get_scenario_feature(self, scenario):
"""
Gets the scenarios feature
"""
if not isinstance(scenario.parent, Feature):
return scenario.parent.parent
return scenario.parent
def log(self, message):
"""
Logs the given message to the syslog
:param string message: the message to log
"""
import syslog
try:
if isinstance(message, unicode):
message = message.encode("utf8")
except Exception: # pylint: disable=broad-except
pass
finally:
syslog.syslog(syslog.LOG_INFO, message)
def syslog_writer_before_all(
self, features, marker
): # pylint: disable=unused-argument
"""
Opens the syslog
"""
import syslog
syslog.openlog(b"radish")
self.log("begin run {0}".format(marker))
def syslog_writer_after_all(
self, features, marker
): # pylint: disable=unused-argument
"""
Closes the syslog
"""
import syslog
self.log("end run {0}".format(marker))
syslog.closelog()
def syslog_writer_before_each_feature(self, feature):
"""
Writes the feature to the syslog
"""
self.log(
"begin feature {0}:{1} {2}".format(
world.config.marker, feature.id, feature.sentence
)
)
def syslog_writer_after_each_feature(self, feature):
"""
Writes the feature to the syslog
"""
self.log(
"end feature {0}:{1} {2}".format(
world.config.marker, feature.id, feature.sentence
)
)
def syslog_writer_before_each_scenario(self, scenario):
"""
Writes the scenario to the syslog
"""
self.log(
"begin scenario {0}:{1}.{2} {3}".format(
world.config.marker,
self.get_scenario_feature(scenario).id,
scenario.id,
scenario.sentence,
)
)
def syslog_writer_after_each_scenario(self, scenario):
"""
Writes the scenario to the syslog
"""
self.log(
"end scenario {0}:{1}.{2} {3}".format(
world.config.marker,
self.get_scenario_feature(scenario).id,
scenario.id,
scenario.sentence,
)
)
def syslog_writer_before_each_step(self, step):
"""
Writes the step to the syslog
"""
self.log(
"begin step {0}:{1}.{2}.{3} {4}".format(
world.config.marker,
self.get_scenario_feature(step.parent).id,
step.parent.id,
step.id,
step.sentence,
)
)
def syslog_writer_after_each_step(self, step):
"""
Writes the step to the syslog
"""
self.log(
"{0} step {1}:{2}.{3}.{4} {5}".format(
step.state,
world.config.marker,
self.get_scenario_feature(step.parent).id,
step.parent.id,
step.id,
step.sentence,
)
)
| 29.339394 | 95 | 0.567445 |
from __future__ import unicode_literals
from radish.terrain import world
from radish.feature import Feature
from radish.hookregistry import before, after
from radish.extensionregistry import extension
@extension
class SyslogWriter(object):
OPTIONS = [
("--syslog", "log all of your features, scenarios, and steps to the syslog")
]
LOAD_IF = staticmethod(lambda config: config.syslog)
LOAD_PRIORITY = 40
def __init__(self):
import syslog
before.all(self.syslog_writer_before_all)
before.each_feature(self.syslog_writer_before_each_feature)
before.each_scenario(self.syslog_writer_before_each_scenario)
before.each_step(self.syslog_writer_before_each_step)
after.all(self.syslog_writer_after_all)
after.each_feature(self.syslog_writer_after_each_feature)
after.each_scenario(self.syslog_writer_after_each_scenario)
after.each_step(self.syslog_writer_after_each_step)
def get_scenario_feature(self, scenario):
if not isinstance(scenario.parent, Feature):
return scenario.parent.parent
return scenario.parent
def log(self, message):
import syslog
try:
if isinstance(message, unicode):
message = message.encode("utf8")
except Exception:
pass
finally:
syslog.syslog(syslog.LOG_INFO, message)
def syslog_writer_before_all(
self, features, marker
):
import syslog
syslog.openlog(b"radish")
self.log("begin run {0}".format(marker))
def syslog_writer_after_all(
self, features, marker
):
import syslog
self.log("end run {0}".format(marker))
syslog.closelog()
def syslog_writer_before_each_feature(self, feature):
self.log(
"begin feature {0}:{1} {2}".format(
world.config.marker, feature.id, feature.sentence
)
)
def syslog_writer_after_each_feature(self, feature):
self.log(
"end feature {0}:{1} {2}".format(
world.config.marker, feature.id, feature.sentence
)
)
def syslog_writer_before_each_scenario(self, scenario):
self.log(
"begin scenario {0}:{1}.{2} {3}".format(
world.config.marker,
self.get_scenario_feature(scenario).id,
scenario.id,
scenario.sentence,
)
)
def syslog_writer_after_each_scenario(self, scenario):
self.log(
"end scenario {0}:{1}.{2} {3}".format(
world.config.marker,
self.get_scenario_feature(scenario).id,
scenario.id,
scenario.sentence,
)
)
def syslog_writer_before_each_step(self, step):
self.log(
"begin step {0}:{1}.{2}.{3} {4}".format(
world.config.marker,
self.get_scenario_feature(step.parent).id,
step.parent.id,
step.id,
step.sentence,
)
)
def syslog_writer_after_each_step(self, step):
self.log(
"{0} step {1}:{2}.{3}.{4} {5}".format(
step.state,
world.config.marker,
self.get_scenario_feature(step.parent).id,
step.parent.id,
step.id,
step.sentence,
)
)
| true | true |
f7313bfac29687bff5e8a360d2fdc2e1ee3e5a5f | 1,447 | py | Python | setup_guide/migrations/0005_auto_20180327_1341.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 1 | 2019-01-18T03:50:46.000Z | 2019-01-18T03:50:46.000Z | setup_guide/migrations/0005_auto_20180327_1341.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 50 | 2018-01-24T18:04:08.000Z | 2019-01-03T03:30:30.000Z | setup_guide/migrations/0005_auto_20180327_1341.py | uktrade/invest | 15b84c511839b46e81608fca9762d2df3f6df16c | [
"MIT"
] | 2 | 2018-02-12T15:20:52.000Z | 2019-01-18T03:51:52.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-27 13:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('setup_guide', '0004_auto_20180322_1443'),
]
operations = [
migrations.RenameField(
model_name='setupguidelandingpage',
old_name='heading_zh',
new_name='heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidelandingpage',
old_name='lead_in_zh',
new_name='lead_in_zh_cn',
),
migrations.RenameField(
model_name='setupguidelandingpage',
old_name='sub_heading_zh',
new_name='sub_heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='description_zh',
new_name='description_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='heading_zh',
new_name='heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='sub_heading_zh',
new_name='sub_heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='subsections_zh',
new_name='subsections_zh_cn',
),
]
| 28.372549 | 51 | 0.580511 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('setup_guide', '0004_auto_20180322_1443'),
]
operations = [
migrations.RenameField(
model_name='setupguidelandingpage',
old_name='heading_zh',
new_name='heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidelandingpage',
old_name='lead_in_zh',
new_name='lead_in_zh_cn',
),
migrations.RenameField(
model_name='setupguidelandingpage',
old_name='sub_heading_zh',
new_name='sub_heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='description_zh',
new_name='description_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='heading_zh',
new_name='heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='sub_heading_zh',
new_name='sub_heading_zh_cn',
),
migrations.RenameField(
model_name='setupguidepage',
old_name='subsections_zh',
new_name='subsections_zh_cn',
),
]
| true | true |
f7313c2994502b974b95d44f22df74068b470940 | 81,610 | py | Python | dask/array/routines.py | leogao2/dask | 4e5dfe7463028a39a90e026c7fb9220969093ab3 | [
"BSD-3-Clause"
] | null | null | null | dask/array/routines.py | leogao2/dask | 4e5dfe7463028a39a90e026c7fb9220969093ab3 | [
"BSD-3-Clause"
] | null | null | null | dask/array/routines.py | leogao2/dask | 4e5dfe7463028a39a90e026c7fb9220969093ab3 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import math
import warnings
from collections.abc import Iterable
from functools import partial, reduce, wraps
from numbers import Integral, Real
import numpy as np
from tlz import concat, interleave, sliding_window
from dask.array import chunk
from dask.array.core import (
Array,
asanyarray,
asarray,
blockwise,
broadcast_arrays,
broadcast_shapes,
broadcast_to,
concatenate,
elemwise,
from_array,
implements,
is_scalar_for_elemwise,
map_blocks,
stack,
tensordot_lookup,
)
from dask.array.creation import arange, diag, empty, indices, tri
from dask.array.einsumfuncs import einsum # noqa
from dask.array.numpy_compat import _numpy_120
from dask.array.reductions import reduction
from dask.array.ufunc import multiply, sqrt
from dask.array.utils import (
array_safe,
asarray_safe,
meta_from_array,
safe_wraps,
validate_axis,
)
from dask.array.wrap import ones
from dask.base import is_dask_collection, tokenize
from dask.core import flatten
from dask.delayed import Delayed, unpack_collections
from dask.highlevelgraph import HighLevelGraph
from dask.utils import apply, derived_from, funcname, is_arraylike, is_cupy_type
# save built-in for histogram functions which use range as a kwarg.
_range = range
@derived_from(np)
def array(x, dtype=None, ndmin=None, *, like=None):
if not _numpy_120 and like is not None:
raise RuntimeError("The use of ``like`` required NumPy >= 1.20")
x = asarray(x, like=like)
while ndmin is not None and x.ndim < ndmin:
x = x[None, :]
if dtype is not None and x.dtype != dtype:
x = x.astype(dtype)
return x
@derived_from(np)
def result_type(*args):
args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]
return np.result_type(*args)
@derived_from(np)
def atleast_3d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None, None]
elif x.ndim == 1:
x = x[None, :, None]
elif x.ndim == 2:
x = x[:, :, None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def atleast_2d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None]
elif x.ndim == 1:
x = x[None, :]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def atleast_1d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def vstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``vstack`` expects a sequence of arrays as the first argument"
)
tup = tuple(atleast_2d(x) for x in tup)
return concatenate(tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes)
@derived_from(np)
def hstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``hstack`` expects a sequence of arrays as the first argument"
)
if all(x.ndim == 1 for x in tup):
return concatenate(
tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes
)
else:
return concatenate(
tup, axis=1, allow_unknown_chunksizes=allow_unknown_chunksizes
)
@derived_from(np)
def dstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``dstack`` expects a sequence of arrays as the first argument"
)
tup = tuple(atleast_3d(x) for x in tup)
return concatenate(tup, axis=2, allow_unknown_chunksizes=allow_unknown_chunksizes)
@derived_from(np)
def swapaxes(a, axis1, axis2):
if axis1 == axis2:
return a
if axis1 < 0:
axis1 = axis1 + a.ndim
if axis2 < 0:
axis2 = axis2 + a.ndim
ind = list(range(a.ndim))
out = list(ind)
out[axis1], out[axis2] = axis2, axis1
return blockwise(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2, dtype=a.dtype)
@derived_from(np)
def transpose(a, axes=None):
if axes:
if len(axes) != a.ndim:
raise ValueError("axes don't match array")
axes = tuple(d + a.ndim if d < 0 else d for d in axes)
else:
axes = tuple(range(a.ndim))[::-1]
return blockwise(
np.transpose, axes, a, tuple(range(a.ndim)), dtype=a.dtype, axes=axes
)
def flip(m, axis=None):
"""
Reverse element order along axis.
Parameters
----------
m : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes to reverse element order of. None will reverse all axes.
Returns
-------
dask.array.Array
The flipped array.
"""
m = asanyarray(m)
sl = m.ndim * [slice(None)]
if axis is None:
axis = range(m.ndim)
if not isinstance(axis, Iterable):
axis = (axis,)
try:
for ax in axis:
sl[ax] = slice(None, None, -1)
except IndexError as e:
raise ValueError(
f"`axis` of {str(axis)} invalid for {str(m.ndim)}-D array"
) from e
sl = tuple(sl)
return m[sl]
@derived_from(np)
def flipud(m):
return flip(m, 0)
@derived_from(np)
def fliplr(m):
return flip(m, 1)
@derived_from(np)
def rot90(m, k=1, axes=(0, 1)):
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = asanyarray(m)
if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim:
raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.")
k %= 4
if k == 0:
return m[:]
if k == 2:
return flip(flip(m, axes[0]), axes[1])
axes_list = list(range(0, m.ndim))
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]])
if k == 1:
return transpose(flip(m, axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
def _tensordot(a, b, axes, is_sparse):
x = max([a, b], key=lambda x: x.__array_priority__)
tensordot = tensordot_lookup.dispatch(type(x))
x = tensordot(a, b, axes=axes)
if is_sparse and len(axes[0]) == 1:
return x
else:
ind = [slice(None, None)] * x.ndim
for a in sorted(axes[0]):
ind.insert(a, None)
x = x[tuple(ind)]
return x
def _tensordot_is_sparse(x):
is_sparse = "sparse" in str(type(x._meta))
if is_sparse:
# exclude pydata sparse arrays, no workaround required for these in tensordot
is_sparse = "sparse._coo.core.COO" not in str(type(x._meta))
return is_sparse
@derived_from(np)
def tensordot(lhs, rhs, axes=2):
if not isinstance(lhs, Array):
lhs = from_array(lhs)
if not isinstance(rhs, Array):
rhs = from_array(rhs)
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - axes, lhs.ndim))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, Integral):
left_axes = (left_axes,)
if isinstance(right_axes, Integral):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
is_sparse = _tensordot_is_sparse(lhs) or _tensordot_is_sparse(rhs)
if is_sparse and len(left_axes) == 1:
concatenate = True
else:
concatenate = False
dt = np.promote_types(lhs.dtype, rhs.dtype)
left_index = list(range(lhs.ndim))
right_index = list(range(lhs.ndim, lhs.ndim + rhs.ndim))
out_index = left_index + right_index
adjust_chunks = {}
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
right_index[r] = left_index[l]
if concatenate:
out_index.remove(left_index[l])
else:
adjust_chunks[left_index[l]] = lambda c: 1
intermediate = blockwise(
_tensordot,
out_index,
lhs,
left_index,
rhs,
right_index,
dtype=dt,
concatenate=concatenate,
adjust_chunks=adjust_chunks,
axes=(left_axes, right_axes),
is_sparse=is_sparse,
)
if concatenate:
return intermediate
else:
return intermediate.sum(axis=left_axes)
@derived_from(np)
def dot(a, b):
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
@derived_from(np)
def vdot(a, b):
return dot(a.conj().ravel(), b.ravel())
def _chunk_sum(a, axis=None, dtype=None, keepdims=None):
# Caution: this is not your conventional array-sum: due
# to the special nature of the preceding blockwise con-
# traction, each chunk is expected to have exactly the
# same shape, with a size of 1 for the dimension given
# by `axis` (the reduction axis). This makes mere ele-
# ment-wise addition of the arrays possible. Besides,
# the output can be merely squeezed to lose the `axis`-
# dimension when keepdims = False
if type(a) is list:
out = reduce(partial(np.add, dtype=dtype), a)
else:
out = a
if keepdims:
return out
else:
return out.squeeze(axis[0])
def _sum_wo_cat(a, axis=None, dtype=None):
if dtype is None:
dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), "dtype", object)
if a.shape[axis] == 1:
return a.squeeze(axis)
return reduction(
a, _chunk_sum, _chunk_sum, axis=axis, dtype=dtype, concatenate=False
)
def _matmul(a, b):
xp = np
if is_cupy_type(a):
# This branch appears to be unnecessary since cupy
# version 9.0. See the following link:
# https://github.com/dask/dask/pull/8423#discussion_r768291271
# But it remains here for backward-compatibility.
# Consider removing it in a future version of dask.
import cupy
xp = cupy
chunk = xp.matmul(a, b)
# Since we have performed the contraction via xp.matmul
# but blockwise expects all dimensions back (including
# the contraction-axis in the 2nd-to-last position of
# the output), we must then put it back in the expected
# the position ourselves:
return chunk[..., xp.newaxis, :]
@derived_from(np)
def matmul(a, b):
a = asanyarray(a)
b = asanyarray(b)
if a.ndim == 0 or b.ndim == 0:
raise ValueError("`matmul` does not support scalars.")
a_is_1d = False
if a.ndim == 1:
a_is_1d = True
a = a[np.newaxis, :]
b_is_1d = False
if b.ndim == 1:
b_is_1d = True
b = b[:, np.newaxis]
if a.ndim < b.ndim:
a = a[(b.ndim - a.ndim) * (np.newaxis,)]
elif a.ndim > b.ndim:
b = b[(a.ndim - b.ndim) * (np.newaxis,)]
# out_ind includes all dimensions to prevent contraction
# in the blockwise below. We set the last two dimensions
# of the output to the contraction axis and the 2nd
# (last) dimension of b in that order
out_ind = tuple(range(a.ndim + 1))
# lhs_ind includes `a`/LHS dimensions
lhs_ind = tuple(range(a.ndim))
# on `b`/RHS everything above 2nd dimension, is the same
# as `a`, -2 dimension is "contracted" with the last dimension
# of `a`, last dimension of `b` is `b` specific
rhs_ind = tuple(range(a.ndim - 2)) + (lhs_ind[-1], a.ndim)
out = blockwise(
_matmul,
out_ind,
a,
lhs_ind,
b,
rhs_ind,
adjust_chunks={lhs_ind[-1]: 1},
dtype=result_type(a, b),
concatenate=False,
)
# Because contraction + concatenate in blockwise leads to high
# memory footprints, we want to avoid them. Instead we will perform
# blockwise (without contraction) followed by reduction. More about
# this issue: https://github.com/dask/dask/issues/6874
# We will also perform the reduction without concatenation
out = _sum_wo_cat(out, axis=-2)
if a_is_1d:
out = out.squeeze(-2)
if b_is_1d:
out = out.squeeze(-1)
return out
@derived_from(np)
def outer(a, b):
a = a.flatten()
b = b.flatten()
dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype
return blockwise(np.outer, "ij", a, "i", b, "j", dtype=dtype)
def _inner_apply_along_axis(arr, func1d, func1d_axis, func1d_args, func1d_kwargs):
return np.apply_along_axis(func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs)
@derived_from(np)
def apply_along_axis(func1d, axis, arr, *args, dtype=None, shape=None, **kwargs):
"""
This is a blocked variant of :func:`numpy.apply_along_axis` implemented via
:func:`dask.array.map_blocks`
Notes
-----
If either of `dtype` or `shape` are not provided, Dask attempts to
determine them by calling `func1d` on a dummy array. This may produce
incorrect values for `dtype` or `shape`, so we recommend providing them.
"""
arr = asarray(arr)
# Verify that axis is valid and throw an error otherwise
axis = len(arr.shape[:axis])
# If necessary, infer dtype and shape of the output of func1d by calling it on test data.
if shape is None or dtype is None:
test_data = np.ones((1,), dtype=arr.dtype)
test_result = np.array(func1d(test_data, *args, **kwargs))
if shape is None:
shape = test_result.shape
if dtype is None:
dtype = test_result.dtype
# Rechunk so that func1d is applied over the full axis.
arr = arr.rechunk(
arr.chunks[:axis] + (arr.shape[axis : axis + 1],) + arr.chunks[axis + 1 :]
)
# Map func1d over the data to get the result
# Adds other axes as needed.
result = arr.map_blocks(
_inner_apply_along_axis,
name=funcname(func1d) + "-along-axis",
dtype=dtype,
chunks=(arr.chunks[:axis] + shape + arr.chunks[axis + 1 :]),
drop_axis=axis,
new_axis=list(range(axis, axis + len(shape), 1)),
func1d=func1d,
func1d_axis=axis,
func1d_args=args,
func1d_kwargs=kwargs,
)
return result
@derived_from(np)
def apply_over_axes(func, a, axes):
# Validate arguments
a = asarray(a)
try:
axes = tuple(axes)
except TypeError:
axes = (axes,)
sl = a.ndim * (slice(None),)
# Compute using `apply_along_axis`.
result = a
for i in axes:
result = apply_along_axis(func, i, result, 0)
# Restore original dimensionality or error.
if result.ndim == (a.ndim - 1):
result = result[sl[:i] + (None,)]
elif result.ndim != a.ndim:
raise ValueError(
"func must either preserve dimensionality of the input"
" or reduce it by one."
)
return result
@derived_from(np)
def ptp(a, axis=None):
return a.max(axis=axis) - a.min(axis=axis)
@derived_from(np)
def diff(a, n=1, axis=-1, prepend=None, append=None):
a = asarray(a)
n = int(n)
axis = int(axis)
if n == 0:
return a
if n < 0:
raise ValueError("order must be non-negative but got %d" % n)
combined = []
if prepend is not None:
prepend = asarray_safe(prepend, like=meta_from_array(a))
if prepend.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
prepend = broadcast_to(prepend, tuple(shape))
combined.append(prepend)
combined.append(a)
if append is not None:
append = asarray_safe(append, like=meta_from_array(a))
if append.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
append = np.broadcast_to(append, tuple(shape))
combined.append(append)
if len(combined) > 1:
a = concatenate(combined, axis)
sl_1 = a.ndim * [slice(None)]
sl_2 = a.ndim * [slice(None)]
sl_1[axis] = slice(1, None)
sl_2[axis] = slice(None, -1)
sl_1 = tuple(sl_1)
sl_2 = tuple(sl_2)
r = a
for i in range(n):
r = r[sl_1] - r[sl_2]
return r
@derived_from(np)
def ediff1d(ary, to_end=None, to_begin=None):
ary = asarray(ary)
aryf = ary.flatten()
r = aryf[1:] - aryf[:-1]
r = [r]
if to_begin is not None:
r = [asarray(to_begin).flatten()] + r
if to_end is not None:
r = r + [asarray(to_end).flatten()]
r = concatenate(r)
return r
def _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):
"""
x: nd-array
array of one block
coord: 1d-array or scalar
coordinate along which the gradient is computed.
axis: int
axis along which the gradient is computed
array_locs:
actual location along axis. None if coordinate is scalar
grad_kwargs:
keyword to be passed to np.gradient
"""
block_loc = block_id[axis]
if array_locs is not None:
coord = coord[array_locs[0][block_loc] : array_locs[1][block_loc]]
grad = np.gradient(x, coord, axis=axis, **grad_kwargs)
return grad
@derived_from(np)
def gradient(f, *varargs, axis=None, **kwargs):
f = asarray(f)
kwargs["edge_order"] = math.ceil(kwargs.get("edge_order", 1))
if kwargs["edge_order"] > 2:
raise ValueError("edge_order must be less than or equal to 2.")
drop_result_list = False
if axis is None:
axis = tuple(range(f.ndim))
elif isinstance(axis, Integral):
drop_result_list = True
axis = (axis,)
axis = validate_axis(axis, f.ndim)
if len(axis) != len(set(axis)):
raise ValueError("duplicate axes not allowed")
axis = tuple(ax % f.ndim for ax in axis)
if varargs == ():
varargs = (1,)
if len(varargs) == 1:
varargs = len(axis) * varargs
if len(varargs) != len(axis):
raise TypeError(
"Spacing must either be a single scalar, or a scalar / 1d-array per axis"
)
if issubclass(f.dtype.type, (np.bool8, Integral)):
f = f.astype(float)
elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:
f = f.astype(float)
results = []
for i, ax in enumerate(axis):
for c in f.chunks[ax]:
if np.min(c) < kwargs["edge_order"] + 1:
raise ValueError(
"Chunk size must be larger than edge_order + 1. "
"Minimum chunk for axis {} is {}. Rechunk to "
"proceed.".format(ax, np.min(c))
)
if np.isscalar(varargs[i]):
array_locs = None
else:
if isinstance(varargs[i], Array):
raise NotImplementedError("dask array coordinated is not supported.")
# coordinate position for each block taking overlap into account
chunk = np.array(f.chunks[ax])
array_loc_stop = np.cumsum(chunk) + 1
array_loc_start = array_loc_stop - chunk - 2
array_loc_stop[-1] -= 1
array_loc_start[0] = 0
array_locs = (array_loc_start, array_loc_stop)
results.append(
f.map_overlap(
_gradient_kernel,
dtype=f.dtype,
depth={j: 1 if j == ax else 0 for j in range(f.ndim)},
boundary="none",
coord=varargs[i],
axis=ax,
array_locs=array_locs,
grad_kwargs=kwargs,
)
)
if drop_result_list:
results = results[0]
return results
def _bincount_agg(bincounts, dtype, **kwargs):
if not isinstance(bincounts, list):
return bincounts
n = max(map(len, bincounts))
out = np.zeros_like(bincounts[0], shape=n, dtype=dtype)
for b in bincounts:
out[: len(b)] += b
return out
@derived_from(np)
def bincount(x, weights=None, minlength=0, split_every=None):
if x.ndim != 1:
raise ValueError("Input array must be one dimensional. Try using x.ravel()")
if weights is not None:
if weights.chunks != x.chunks:
raise ValueError("Chunks of input array x and weights must match.")
token = tokenize(x, weights, minlength)
args = [x, "i"]
if weights is not None:
meta = array_safe(np.bincount([1], weights=[1]), like=meta_from_array(x))
args.extend([weights, "i"])
else:
meta = array_safe(np.bincount([]), like=meta_from_array(x))
if minlength == 0:
output_size = (np.nan,)
else:
output_size = (minlength,)
chunked_counts = blockwise(
partial(np.bincount, minlength=minlength), "i", *args, token=token, meta=meta
)
chunked_counts._chunks = (
output_size * len(chunked_counts.chunks[0]),
*chunked_counts.chunks[1:],
)
from dask.array.reductions import _tree_reduce
output = _tree_reduce(
chunked_counts,
aggregate=partial(_bincount_agg, dtype=meta.dtype),
axis=(0,),
keepdims=True,
dtype=meta.dtype,
split_every=split_every,
concatenate=False,
)
output._chunks = (output_size, *chunked_counts.chunks[1:])
output._meta = meta
return output
@derived_from(np)
def digitize(a, bins, right=False):
bins = asarray_safe(bins, like=meta_from_array(a))
dtype = np.digitize(asarray_safe([0], like=bins), bins, right=False).dtype
return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)
def _searchsorted_block(x, y, side):
res = np.searchsorted(x, y, side=side)
# 0 is only correct for the first block of a, but blockwise doesn't have a way
# of telling which block is being operated on (unlike map_blocks),
# so set all 0 values to a special value and set back at the end of searchsorted
res[res == 0] = -1
return res[np.newaxis, :]
@derived_from(np)
def searchsorted(a, v, side="left", sorter=None):
if a.ndim != 1:
raise ValueError("Input array a must be one dimensional")
if sorter is not None:
raise NotImplementedError(
"da.searchsorted with a sorter argument is not supported"
)
# call np.searchsorted for each pair of blocks in a and v
meta = np.searchsorted(a._meta, v._meta)
out = blockwise(
_searchsorted_block,
list(range(v.ndim + 1)),
a,
[0],
v,
list(range(1, v.ndim + 1)),
side,
None,
meta=meta,
adjust_chunks={0: 1}, # one row for each block in a
)
# add offsets to take account of the position of each block within the array a
a_chunk_sizes = array_safe((0, *a.chunks[0]), like=meta_from_array(a))
a_chunk_offsets = np.cumsum(a_chunk_sizes)[:-1]
a_chunk_offsets = a_chunk_offsets[(Ellipsis,) + v.ndim * (np.newaxis,)]
a_offsets = asarray(a_chunk_offsets, chunks=1)
out = where(out < 0, out, out + a_offsets)
# combine the results from each block (of a)
out = out.max(axis=0)
# fix up any -1 values
out[out == -1] = 0
return out
# TODO: dask linspace doesn't support delayed values
def _linspace_from_delayed(start, stop, num=50):
linspace_name = "linspace-" + tokenize(start, stop, num)
(start_ref, stop_ref, num_ref), deps = unpack_collections([start, stop, num])
if len(deps) == 0:
return np.linspace(start, stop, num=num)
linspace_dsk = {(linspace_name, 0): (np.linspace, start_ref, stop_ref, num_ref)}
linspace_graph = HighLevelGraph.from_collections(
linspace_name, linspace_dsk, dependencies=deps
)
chunks = ((np.nan,),) if is_dask_collection(num) else ((num,),)
return Array(linspace_graph, linspace_name, chunks, dtype=float)
def _block_hist(x, bins, range=None, weights=None):
return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
"""
Blocked variant of :func:`numpy.histogram`.
Parameters
----------
a : dask.array.Array
Input data; the histogram is computed over the flattened
array. If the ``weights`` argument is used, the chunks of
``a`` are accessed to check chunking compatibility between
``a`` and ``weights``. If ``weights`` is ``None``, a
:py:class:`dask.dataframe.Series` object can be passed as
input data.
bins : int or sequence of scalars, optional
Either an iterable specifying the ``bins`` or the number of ``bins``
and a ``range`` argument is required as computing ``min`` and ``max``
over blocked arrays is an expensive operation that must be performed
explicitly.
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This is equivalent to the ``density`` argument, but produces incorrect
results for unequal bin widths. It should not be used.
weights : dask.array.Array, optional
A dask.array.Array of weights, of the same block structure as ``a``. Each value in
``a`` only contributes its associated weight towards the bin count
(instead of 1). If ``density`` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
If ``density`` is True, ``bins`` cannot be a single-number delayed
value. It must be a concrete number, or a (possibly-delayed)
array/sequence of the bin edges.
Returns
-------
hist : dask Array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : dask Array of dtype float
Return the bin edges ``(length(hist)+1)``.
Examples
--------
Using number of bins and range:
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array(np.arange(10000), chunks=10)
>>> h, bins = da.histogram(x, bins=10, range=[0, 10000])
>>> bins
array([ 0., 1000., 2000., 3000., 4000., 5000., 6000., 7000.,
8000., 9000., 10000.])
>>> h.compute()
array([1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000])
Explicitly specifying the bins:
>>> h, bins = da.histogram(x, bins=np.array([0, 5000, 10000]))
>>> bins
array([ 0, 5000, 10000])
>>> h.compute()
array([5000, 5000])
"""
if isinstance(bins, Array):
scalar_bins = bins.ndim == 0
# ^ `np.ndim` is not implemented by Dask array.
elif isinstance(bins, Delayed):
scalar_bins = bins._length is None or bins._length == 1
else:
scalar_bins = np.ndim(bins) == 0
if bins is None or (scalar_bins and range is None):
raise ValueError(
"dask.array.histogram requires either specifying "
"bins as an iterable or specifying both a range and "
"the number of bins"
)
if weights is not None and weights.chunks != a.chunks:
raise ValueError("Input array and weights must have the same chunked structure")
if normed is not False:
raise ValueError(
"The normed= keyword argument has been deprecated. "
"Please use density instead. "
"See the numpy.histogram docstring for more information."
)
if density and scalar_bins and isinstance(bins, (Array, Delayed)):
raise NotImplementedError(
"When `density` is True, `bins` cannot be a scalar Dask object. "
"It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."
)
for argname, val in [("bins", bins), ("range", range), ("weights", weights)]:
if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):
raise TypeError(
"Dask types besides Array and Delayed are not supported "
"for `histogram`. For argument `{}`, got: {!r}".format(argname, val)
)
if range is not None:
try:
if len(range) != 2:
raise ValueError(
f"range must be a sequence or array of length 2, but got {len(range)} items"
)
if isinstance(range, (Array, np.ndarray)) and range.shape != (2,):
raise ValueError(
f"range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"
)
except TypeError:
raise TypeError(
f"Expected a sequence or array for range, not {range}"
) from None
token = tokenize(a, bins, range, weights, density)
name = "histogram-sum-" + token
if scalar_bins:
bins = _linspace_from_delayed(range[0], range[1], bins + 1)
# ^ NOTE `range[1]` is safe because of the above check, and the initial check
# that range must not be None if `scalar_bins`
else:
if not isinstance(bins, (Array, np.ndarray)):
bins = asarray(bins)
if bins.ndim != 1:
raise ValueError(
f"bins must be a 1-dimensional array or sequence, got shape {bins.shape}"
)
(bins_ref, range_ref), deps = unpack_collections([bins, range])
# Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk
if weights is None:
dsk = {
(name, i, 0): (_block_hist, k, bins_ref, range_ref)
for i, k in enumerate(flatten(a.__dask_keys__()))
}
dtype = np.histogram([])[0].dtype
else:
a_keys = flatten(a.__dask_keys__())
w_keys = flatten(weights.__dask_keys__())
dsk = {
(name, i, 0): (_block_hist, k, bins_ref, range_ref, w)
for i, (k, w) in enumerate(zip(a_keys, w_keys))
}
dtype = weights.dtype
deps = (a,) + deps
if weights is not None:
deps += (weights,)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
# Turn graph into a 2D Array of shape (nchunks, nbins)
nchunks = len(list(flatten(a.__dask_keys__())))
nbins = bins.size - 1 # since `bins` is 1D
chunks = ((1,) * nchunks, (nbins,))
mapped = Array(graph, name, chunks, dtype=dtype)
# Sum over chunks to get the final histogram
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = asarray(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
return n, bins
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, density=None):
"""Blocked variant of :func:`numpy.histogram2d`.
Parameters
----------
x : dask.array.Array
An array containing the `x`-coordinates of the points to be
histogrammed.
y : dask.array.Array
An array containing the `y`-coordinates of the points to be
histogrammed.
bins : sequence of arrays describing bin edges, int, or sequence of ints
The bin specification. See the `bins` argument description for
:py:func:`histogramdd` for a complete description of all
possible bin configurations (this function is a 2D specific
version of histogramdd).
range : tuple of pairs, optional.
The leftmost and rightmost edges of the bins along each
dimension when integers are passed to `bins`; of the form:
((xmin, xmax), (ymin, ymax)).
normed : bool, optional
An alias for the density argument that behaves identically. To
avoid confusion with the broken argument in the `histogram`
function, `density` should be preferred.
weights : dask.array.Array, optional
An array of values weighing each sample in the input data. The
chunks of the weights must be identical to the chunking along
the 0th (row) axis of the data sample.
density : bool, optional
If False (the default) return the number of samples in each
bin. If True, the returned array represents the probability
density function at each bin.
Returns
-------
dask.array.Array
The values of the histogram.
dask.array.Array
The edges along the `x`-dimension.
dask.array.Array
The edges along the `y`-dimension.
See Also
--------
histogram
histogramdd
Examples
--------
>>> import dask.array as da
>>> x = da.array([2, 4, 2, 4, 2, 4])
>>> y = da.array([2, 2, 4, 4, 2, 4])
>>> bins = 2
>>> range = ((0, 6), (0, 6))
>>> h, xedges, yedges = da.histogram2d(x, y, bins=bins, range=range)
>>> h
dask.array<sum-aggregate, shape=(2, 2), dtype=float64, chunksize=(2, 2), chunktype=numpy.ndarray>
>>> xedges
dask.array<array, shape=(3,), dtype=float64, chunksize=(3,), chunktype=numpy.ndarray>
>>> h.compute()
array([[2., 1.],
[1., 2.]])
"""
counts, edges = histogramdd(
(x, y),
bins=bins,
range=range,
normed=normed,
weights=weights,
density=density,
)
return counts, edges[0], edges[1]
def _block_histogramdd_rect(sample, bins, range, weights):
"""Call numpy.histogramdd for a blocked/chunked calculation.
Slurps the result into an additional outer axis; this new axis
will be used to stack chunked calls of the numpy function and add
them together later.
Returns
-------
:py:object:`np.ndarray`
NumPy array with an additional outer dimension.
"""
return np.histogramdd(sample, bins, range=range, weights=weights)[0:1]
def _block_histogramdd_multiarg(*args):
"""Call numpy.histogramdd for a multi argument blocked/chunked calculation.
Slurps the result into an additional outer axis; this new axis
will be used to stack chunked calls of the numpy function and add
them together later.
The last three arguments _must be_ (bins, range, weights).
The difference between this function and
_block_histogramdd_rect is that here we expect the sample
to be composed of multiple arguments (multiple 1D arrays, each one
representing a coordinate), while _block_histogramdd_rect
expects a single rectangular (2D array where columns are
coordinates) sample.
"""
bins, range, weights = args[-3:]
sample = args[:-3]
return np.histogramdd(sample, bins=bins, range=range, weights=weights)[0:1]
def histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):
"""Blocked variant of :func:`numpy.histogramdd`.
Chunking of the input data (``sample``) is only allowed along the
0th (row) axis (the axis corresponding to the total number of
samples). Data chunked along the 1st axis (column) axis is not
compatible with this function. If weights are used, they must be
chunked along the 0th axis identically to the input sample.
An example setup for a three dimensional histogram, where the
sample shape is ``(8, 3)`` and weights are shape ``(8,)``, sample
chunks would be ``((4, 4), (3,))`` and the weights chunks would be
``((4, 4),)`` a table of the structure:
+-------+-----------------------+-----------+
| | sample (8 x 3) | weights |
+=======+=====+=====+=====+=====+=====+=====+
| chunk | row | `x` | `y` | `z` | row | `w` |
+-------+-----+-----+-----+-----+-----+-----+
| | 0 | 5 | 6 | 6 | 0 | 0.5 |
| +-----+-----+-----+-----+-----+-----+
| | 1 | 8 | 9 | 2 | 1 | 0.8 |
| 0 +-----+-----+-----+-----+-----+-----+
| | 2 | 3 | 3 | 1 | 2 | 0.3 |
| +-----+-----+-----+-----+-----+-----+
| | 3 | 2 | 5 | 6 | 3 | 0.7 |
+-------+-----+-----+-----+-----+-----+-----+
| | 4 | 3 | 1 | 1 | 4 | 0.3 |
| +-----+-----+-----+-----+-----+-----+
| | 5 | 3 | 2 | 9 | 5 | 1.3 |
| 1 +-----+-----+-----+-----+-----+-----+
| | 6 | 8 | 1 | 5 | 6 | 0.8 |
| +-----+-----+-----+-----+-----+-----+
| | 7 | 3 | 5 | 3 | 7 | 0.7 |
+-------+-----+-----+-----+-----+-----+-----+
If the sample 0th dimension and weight 0th (row) dimension are
chunked differently, a ``ValueError`` will be raised. If
coordinate groupings ((x, y, z) trios) are separated by a chunk
boundry, then a ``ValueError`` will be raised. We suggest that you
rechunk your data if it is of that form.
The chunks property of the data (and optional weights) are used to
check for compatibility with the blocked algorithm (as described
above); therefore, you must call `to_dask_array` on a collection
from ``dask.dataframe``, i.e. :class:`dask.dataframe.Series` or
:class:`dask.dataframe.DataFrame`.
The function is also compatible with `x`, `y`, and `z` being
individual 1D arrays with equal chunking. In that case, the data
should be passed as a tuple: ``histogramdd((x, y, z), ...)``
Parameters
----------
sample : dask.array.Array (N, D) or sequence of dask.array.Array
Multidimensional data to be histogrammed.
Note the unusual interpretation of a sample when it is a
sequence of dask Arrays:
* When a (N, D) dask Array, each row is an entry in the sample
(coordinate in D dimensional space).
* When a sequence of dask Arrays, each element in the sequence
is the array of values for a single coordinate.
bins : sequence of arrays describing bin edges, int, or sequence of ints
The bin specification.
The possible binning configurations are:
* A sequence of arrays describing the monotonically increasing
bin edges along each dimension.
* A single int describing the total number of bins that will
be used in each dimension (this requires the ``range``
argument to be defined).
* A sequence of ints describing the total number of bins to be
used in each dimension (this requires the ``range`` argument
to be defined).
When bins are described by arrays, the rightmost edge is
included. Bins described by arrays also allows for non-uniform
bin widths.
range : sequence of pairs, optional
A sequence of length D, each a (min, max) tuple giving the
outer bin edges to be used if the edges are not given
explicitly in `bins`. If defined, this argument is required to
have an entry for each dimension. Unlike
:func:`numpy.histogramdd`, if `bins` does not define bin
edges, this argument is required (this function will not
automatically use the min and max of of the value in a given
dimension because the input data may be lazy in dask).
normed : bool, optional
An alias for the density argument that behaves identically. To
avoid confusion with the broken argument to `histogram`,
`density` should be preferred.
weights : dask.array.Array, optional
An array of values weighing each sample in the input data. The
chunks of the weights must be identical to the chunking along
the 0th (row) axis of the data sample.
density : bool, optional
If ``False`` (default), the returned array represents the
number of samples in each bin. If ``True``, the returned array
represents the probability density function at each bin.
See Also
--------
histogram
Returns
-------
dask.array.Array
The values of the histogram.
list(dask.array.Array)
Sequence of arrays representing the bin edges along each
dimension.
Examples
--------
Computing the histogram in 5 blocks using different bin edges
along each dimension:
>>> import dask.array as da
>>> x = da.random.uniform(0, 1, size=(1000, 3), chunks=(200, 3))
>>> edges = [
... np.linspace(0, 1, 5), # 4 bins in 1st dim
... np.linspace(0, 1, 6), # 5 in the 2nd
... np.linspace(0, 1, 4), # 3 in the 3rd
... ]
>>> h, edges = da.histogramdd(x, bins=edges)
>>> result = h.compute()
>>> result.shape
(4, 5, 3)
Defining the bins by total number and their ranges, along with
using weights:
>>> bins = (4, 5, 3)
>>> ranges = ((0, 1),) * 3 # expands to ((0, 1), (0, 1), (0, 1))
>>> w = da.random.uniform(0, 1, size=(1000,), chunks=x.chunksize[0])
>>> h, edges = da.histogramdd(x, bins=bins, range=ranges, weights=w)
>>> np.isclose(h.sum().compute(), w.sum().compute())
True
Using a sequence of 1D arrays as the input:
>>> x = da.array([2, 4, 2, 4, 2, 4])
>>> y = da.array([2, 2, 4, 4, 2, 4])
>>> z = da.array([4, 2, 4, 2, 4, 2])
>>> bins = ([0, 3, 6],) * 3
>>> h, edges = da.histogramdd((x, y, z), bins)
>>> h
dask.array<sum-aggregate, shape=(2, 2, 2), dtype=float64, chunksize=(2, 2, 2), chunktype=numpy.ndarray>
>>> edges[0]
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> h.compute()
array([[[0., 2.],
[0., 1.]],
<BLANKLINE>
[[1., 0.],
[2., 0.]]])
>>> edges[0].compute()
array([0, 3, 6])
>>> edges[1].compute()
array([0, 3, 6])
>>> edges[2].compute()
array([0, 3, 6])
"""
# logic used in numpy.histogramdd to handle normed/density.
if normed is None:
if density is None:
density = False
elif density is None:
# an explicit normed argument was passed, alias it to the new name
density = normed
else:
raise TypeError("Cannot specify both 'normed' and 'density'")
# check if any dask collections (dc) were passed to bins= or
# range= these are unsupported.
dc_bins = is_dask_collection(bins)
if isinstance(bins, (list, tuple)):
dc_bins = dc_bins or any([is_dask_collection(b) for b in bins])
dc_range = (
any([is_dask_collection(r) for r in range]) if range is not None else False
)
if dc_bins or dc_range:
raise NotImplementedError(
"Passing dask collections to bins=... or range=... is not supported."
)
# generate token and name for task
token = tokenize(sample, bins, range, weights, density)
name = f"histogramdd-sum-{token}"
# N == total number of samples
# D == total number of dimensions
if hasattr(sample, "shape"):
if len(sample.shape) != 2:
raise ValueError("Single array input to histogramdd should be columnar")
else:
_, D = sample.shape
n_chunks = sample.numblocks[0]
rectangular_sample = True
# Require data to be chunked along the first axis only.
if sample.shape[1:] != sample.chunksize[1:]:
raise ValueError("Input array can only be chunked along the 0th axis.")
elif isinstance(sample, (tuple, list)):
rectangular_sample = False
D = len(sample)
n_chunks = sample[0].numblocks[0]
for i in _range(1, D):
if sample[i].chunks != sample[0].chunks:
raise ValueError("All coordinate arrays must be chunked identically.")
else:
raise ValueError(
"Incompatible sample. Must be a 2D array or a sequence of 1D arrays."
)
# Require only Array or Delayed objects for bins, range, and weights.
for argname, val in [("bins", bins), ("range", range), ("weights", weights)]:
if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):
raise TypeError(
"Dask types besides Array and Delayed are not supported "
"for `histogramdd`. For argument `{}`, got: {!r}".format(argname, val)
)
# Require that the chunking of the sample and weights are compatible.
if weights is not None:
if rectangular_sample and weights.chunks[0] != sample.chunks[0]:
raise ValueError(
"Input array and weights must have the same shape "
"and chunk structure along the first dimension."
)
elif not rectangular_sample and weights.numblocks[0] != n_chunks:
raise ValueError(
"Input arrays and weights must have the same shape "
"and chunk structure."
)
# if bins is a list, tuple, then make sure the length is the same
# as the number dimensions.
if isinstance(bins, (list, tuple)):
if len(bins) != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample."
)
# if range is defined, check that it's the right length and also a
# sequence of pairs.
if range is not None:
if len(range) != D:
raise ValueError(
"range argument requires one entry, a min max pair, per dimension."
)
if not all(len(r) == 2 for r in range):
raise ValueError("range argument should be a sequence of pairs")
# If bins is a single int, create a tuple of len `D` containing `bins`.
if isinstance(bins, int):
bins = (bins,) * D
# we will return the edges to mimic the NumPy API (we also use the
# edges later as a way to calculate the total number of bins).
if all(isinstance(b, int) for b in bins) and all(len(r) == 2 for r in range):
edges = [np.linspace(r[0], r[1], b + 1) for b, r in zip(bins, range)]
else:
edges = [np.asarray(b) for b in bins]
if rectangular_sample:
deps = (sample,)
else:
deps = tuple(sample)
if weights is not None:
w_keys = flatten(weights.__dask_keys__())
deps += (weights,)
dtype = weights.dtype
else:
w_keys = (None,) * n_chunks
dtype = np.histogramdd([])[0].dtype
# This tuple of zeros represents the chunk index along the columns
# (we only allow chunking along the rows).
column_zeros = tuple(0 for _ in _range(D))
# With dsk below, we will construct a (D + 1) dimensional array
# stacked for each chunk. For example, if the histogram is going
# to be 3 dimensions, this creates a stack of cubes (1 cube for
# each sample chunk) that will be collapsed into a final cube (the
# result). Depending on the input data, we can do this in two ways
#
# 1. The rectangular case: when the sample is a single 2D array
# where each column in the sample represents a coordinate of
# the sample).
#
# 2. The sequence-of-arrays case, when the sample is a tuple or
# list of arrays, with each array in that sequence representing
# the entirety of one coordinate of the complete sample.
if rectangular_sample:
sample_keys = flatten(sample.__dask_keys__())
dsk = {
(name, i, *column_zeros): (_block_histogramdd_rect, k, bins, range, w)
for i, (k, w) in enumerate(zip(sample_keys, w_keys))
}
else:
sample_keys = [
list(flatten(sample[i].__dask_keys__())) for i in _range(len(sample))
]
fused_on_chunk_keys = [
tuple(sample_keys[j][i] for j in _range(D)) for i in _range(n_chunks)
]
dsk = {
(name, i, *column_zeros): (
_block_histogramdd_multiarg,
*(*k, bins, range, w),
)
for i, (k, w) in enumerate(zip(fused_on_chunk_keys, w_keys))
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
all_nbins = tuple((b.size - 1,) for b in edges)
stacked_chunks = ((1,) * n_chunks, *all_nbins)
mapped = Array(graph, name, stacked_chunks, dtype=dtype)
# Finally, sum over chunks providing to get the final D
# dimensional result array.
n = mapped.sum(axis=0)
if density:
# compute array of values to divide by the bin width along
# each dimension.
width_divider = np.ones(n.shape)
for i in _range(D):
shape = np.ones(D, int)
shape[i] = width_divider.shape[i]
width_divider *= np.diff(edges[i]).reshape(shape)
width_divider = asarray(width_divider, chunks=n.chunks)
return n / width_divider / n.sum(), edges
return n, [asarray(entry) for entry in edges]
@derived_from(np)
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
# This was copied almost verbatim from np.cov
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
# Handles complex arrays too
m = asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
# check ddof
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X = X - X.mean(axis=1 - axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
@derived_from(np)
def corrcoef(x, y=None, rowvar=1):
c = cov(x, y, rowvar)
if c.shape == ():
return c / c
d = diag(c)
d = d.reshape((d.shape[0], 1))
sqr_d = sqrt(d)
return (c / sqr_d) / sqr_d.T
@implements(np.round, np.round_)
@derived_from(np)
def round(a, decimals=0):
return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)
@implements(np.ndim)
@derived_from(np)
def ndim(a):
return a.ndim
@implements(np.iscomplexobj)
@derived_from(np)
def iscomplexobj(x):
return issubclass(x.dtype.type, np.complexfloating)
def _unique_internal(ar, indices, counts, return_inverse=False):
"""
Helper/wrapper function for :func:`numpy.unique`.
Uses :func:`numpy.unique` to find the unique values for the array chunk.
Given this chunk may not represent the whole array, also take the
``indices`` and ``counts`` that are in 1-to-1 correspondence to ``ar``
and reduce them in the same fashion as ``ar`` is reduced. Namely sum
any counts that correspond to the same value and take the smallest
index that corresponds to the same value.
To handle the inverse mapping from the unique values to the original
array, simply return a NumPy array created with ``arange`` with enough
values to correspond 1-to-1 to the unique values. While there is more
work needed to be done to create the full inverse mapping for the
original array, this provides enough information to generate the
inverse mapping in Dask.
Given Dask likes to have one array returned from functions like
``blockwise``, some formatting is done to stuff all of the resulting arrays
into one big NumPy structured array. Dask is then able to handle this
object and can split it apart into the separate results on the Dask side,
which then can be passed back to this function in concatenated chunks for
further reduction or can be return to the user to perform other forms of
analysis.
By handling the problem in this way, it does not matter where a chunk
is in a larger array or how big it is. The chunk can still be computed
on the same way. Also it does not matter if the chunk is the result of
other chunks being run through this function multiple times. The end
result will still be just as accurate using this strategy.
"""
return_index = indices is not None
return_counts = counts is not None
u = np.unique(ar)
dt = [("values", u.dtype)]
if return_index:
dt.append(("indices", np.intp))
if return_inverse:
dt.append(("inverse", np.intp))
if return_counts:
dt.append(("counts", np.intp))
r = np.empty(u.shape, dtype=dt)
r["values"] = u
if return_inverse:
r["inverse"] = np.arange(len(r), dtype=np.intp)
if return_index or return_counts:
for i, v in enumerate(r["values"]):
m = ar == v
if return_index:
indices[m].min(keepdims=True, out=r["indices"][i : i + 1])
if return_counts:
counts[m].sum(keepdims=True, out=r["counts"][i : i + 1])
return r
def unique_no_structured_arr(
ar, return_index=False, return_inverse=False, return_counts=False
):
# A simplified version of `unique`, that allows computing unique for array
# types that don't support structured arrays (such as cupy.ndarray), but
# can only compute values at the moment.
if (
return_index is not False
or return_inverse is not False
or return_counts is not False
):
raise ValueError(
"dask.array.unique does not support `return_index`, `return_inverse` "
"or `return_counts` with array types that don't support structured "
"arrays."
)
ar = ar.ravel()
args = [ar, "i"]
meta = meta_from_array(ar)
out = blockwise(np.unique, "i", *args, meta=meta)
out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)
out_parts = [out]
name = "unique-aggregate-" + out.name
dsk = {
(name, 0): (
(np.unique,)
+ tuple(
(np.concatenate, o.__dask_keys__())
if hasattr(o, "__dask_keys__")
else o
for o in out_parts
)
)
}
dependencies = [o for o in out_parts if hasattr(o, "__dask_keys__")]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
chunks = ((np.nan,),)
out = Array(graph, name, chunks, meta=meta)
result = [out]
if len(result) == 1:
result = result[0]
else:
result = tuple(result)
return result
@derived_from(np)
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
# Test whether the downstream library supports structured arrays. If the
# `np.empty_like` call raises a `TypeError`, the downstream library (e.g.,
# CuPy) doesn't support it. In that case we return the
# `unique_no_structured_arr` implementation, otherwise (e.g., NumPy) just
# continue as normal.
try:
meta = meta_from_array(ar)
np.empty_like(meta, dtype=[("a", int), ("b", float)])
except TypeError:
return unique_no_structured_arr(
ar,
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
)
ar = ar.ravel()
# Run unique on each chunk and collect results in a Dask Array of
# unknown size.
args = [ar, "i"]
out_dtype = [("values", ar.dtype)]
if return_index:
args.extend([arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]), "i"])
out_dtype.append(("indices", np.intp))
else:
args.extend([None, None])
if return_counts:
args.extend([ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]), "i"])
out_dtype.append(("counts", np.intp))
else:
args.extend([None, None])
out = blockwise(_unique_internal, "i", *args, dtype=out_dtype, return_inverse=False)
out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)
# Take the results from the unique chunks and do the following.
#
# 1. Collect all results as arguments.
# 2. Concatenate each result into one big array.
# 3. Pass all results as arguments to the internal unique again.
#
# TODO: This should be replaced with a tree reduction using this strategy.
# xref: https://github.com/dask/dask/issues/2851
out_parts = [out["values"]]
if return_index:
out_parts.append(out["indices"])
else:
out_parts.append(None)
if return_counts:
out_parts.append(out["counts"])
else:
out_parts.append(None)
name = "unique-aggregate-" + out.name
dsk = {
(name, 0): (
(_unique_internal,)
+ tuple(
(np.concatenate, o.__dask_keys__())
if hasattr(o, "__dask_keys__")
else o
for o in out_parts
)
+ (return_inverse,)
)
}
out_dtype = [("values", ar.dtype)]
if return_index:
out_dtype.append(("indices", np.intp))
if return_inverse:
out_dtype.append(("inverse", np.intp))
if return_counts:
out_dtype.append(("counts", np.intp))
dependencies = [o for o in out_parts if hasattr(o, "__dask_keys__")]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
chunks = ((np.nan,),)
out = Array(graph, name, chunks, out_dtype)
# Split out all results to return to the user.
result = [out["values"]]
if return_index:
result.append(out["indices"])
if return_inverse:
# Using the returned unique values and arange of unknown length, find
# each value matching a unique value and replace it with its
# corresponding index or `0`. There should be only one entry for this
# index in axis `1` (the one of unknown length). Reduce axis `1`
# through summing to get an array with known dimensionality and the
# mapping of the original values.
mtches = (ar[:, None] == out["values"][None, :]).astype(np.intp)
result.append((mtches * out["inverse"]).sum(axis=1))
if return_counts:
result.append(out["counts"])
if len(result) == 1:
result = result[0]
else:
result = tuple(result)
return result
def _isin_kernel(element, test_elements, assume_unique=False):
values = np.in1d(element.ravel(), test_elements, assume_unique=assume_unique)
return values.reshape(element.shape + (1,) * test_elements.ndim)
@safe_wraps(getattr(np, "isin", None))
def isin(element, test_elements, assume_unique=False, invert=False):
element = asarray(element)
test_elements = asarray(test_elements)
element_axes = tuple(range(element.ndim))
test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))
mapped = blockwise(
_isin_kernel,
element_axes + test_axes,
element,
element_axes,
test_elements,
test_axes,
adjust_chunks={axis: lambda _: 1 for axis in test_axes},
dtype=bool,
assume_unique=assume_unique,
)
result = mapped.any(axis=test_axes)
if invert:
result = ~result
return result
@derived_from(np)
def roll(array, shift, axis=None):
result = array
if axis is None:
result = ravel(result)
if not isinstance(shift, Integral):
raise TypeError(
"Expect `shift` to be an instance of Integral when `axis` is None."
)
shift = (shift,)
axis = (0,)
else:
try:
len(shift)
except TypeError:
shift = (shift,)
try:
len(axis)
except TypeError:
axis = (axis,)
if len(shift) != len(axis):
raise ValueError("Must have the same number of shifts as axes.")
for i, s in zip(axis, shift):
s = -s
s %= result.shape[i]
sl1 = result.ndim * [slice(None)]
sl2 = result.ndim * [slice(None)]
sl1[i] = slice(s, None)
sl2[i] = slice(None, s)
sl1 = tuple(sl1)
sl2 = tuple(sl2)
result = concatenate([result[sl1], result[sl2]], axis=i)
result = result.reshape(array.shape)
# Ensure that the output is always a new array object
result = result.copy() if result is array else result
return result
@derived_from(np)
def shape(array):
return array.shape
@derived_from(np)
def union1d(ar1, ar2):
return unique(concatenate((ar1.ravel(), ar2.ravel())))
@derived_from(np)
def ravel(array_like):
return asanyarray(array_like).reshape((-1,))
@derived_from(np)
def expand_dims(a, axis):
if type(axis) not in (tuple, list):
axis = (axis,)
out_ndim = len(axis) + a.ndim
axis = validate_axis(axis, out_ndim)
shape_it = iter(a.shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
return a.reshape(shape)
@derived_from(np)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
elif not isinstance(axis, tuple):
axis = (axis,)
if any(a.shape[i] != 1 for i in axis):
raise ValueError("cannot squeeze axis with size other than one")
axis = validate_axis(axis, a.ndim)
sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))
a = a[sl]
return a
@derived_from(np)
def compress(condition, a, axis=None):
if not is_arraylike(condition):
# Allow `condition` to be anything array-like, otherwise ensure `condition`
# is a numpy array.
condition = np.asarray(condition)
condition = condition.astype(bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError("Condition must be one dimensional")
if axis is None:
a = a.ravel()
axis = 0
axis = validate_axis(axis, a.ndim)
# Treat `condition` as filled with `False` (if it is too short)
a = a[
tuple(
slice(None, len(condition)) if i == axis else slice(None)
for i in range(a.ndim)
)
]
# Use `condition` to select along 1 dimension
a = a[tuple(condition if i == axis else slice(None) for i in range(a.ndim))]
return a
@derived_from(np)
def extract(condition, arr):
condition = asarray(condition).astype(bool)
arr = asarray(arr)
return compress(condition.ravel(), arr.ravel())
@derived_from(np)
def take(a, indices, axis=0):
axis = validate_axis(axis, a.ndim)
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(
lambda block: np.take(a, block, axis), chunks=indices.chunks, dtype=a.dtype
)
@derived_from(np)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def _asarray_isnull(values):
import pandas as pd
return np.asarray(pd.isnull(values))
def isnull(values):
"""pandas.isnull for dask arrays"""
# eagerly raise ImportError, if pandas isn't available
import pandas as pd # noqa
return elemwise(_asarray_isnull, values, dtype="bool")
def notnull(values):
"""pandas.notnull for dask arrays"""
return ~isnull(values)
@derived_from(np)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype="bool")
@derived_from(np)
def allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()
def variadic_choose(a, *choices):
return np.choose(a, choices)
@derived_from(np)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
def _isnonzero_vec(v):
return bool(np.count_nonzero(v))
_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])
def isnonzero(a):
if a.dtype.kind in {"U", "S"}:
# NumPy treats all-whitespace strings as falsy (like in `np.nonzero`).
# but not in `.astype(bool)`. To match the behavior of numpy at least until
# 1.19, we use `_isnonzero_vec`. When NumPy changes behavior, we should just
# use the try block below.
# https://github.com/numpy/numpy/issues/9875
return a.map_blocks(_isnonzero_vec, dtype=bool)
try:
np.zeros(tuple(), dtype=a.dtype).astype(bool)
except ValueError:
######################################################
# Handle special cases where conversion to bool does #
# not work correctly. #
# #
# xref: https://github.com/numpy/numpy/issues/9479 #
######################################################
return a.map_blocks(_isnonzero_vec, dtype=bool)
else:
return a.astype(bool)
@derived_from(np)
def argwhere(a):
a = asarray(a)
nz = isnonzero(a).flatten()
ind = indices(a.shape, dtype=np.intp, chunks=a.chunks)
if ind.ndim > 1:
ind = stack([ind[i].ravel() for i in range(len(ind))], axis=1)
ind = compress(nz, ind, axis=0)
return ind
@derived_from(np)
def where(condition, x=None, y=None):
if (x is None) != (y is None):
raise ValueError("either both or neither of x and y should be given")
if (x is None) and (y is None):
return nonzero(condition)
if np.isscalar(condition):
dtype = result_type(x, y)
x = asarray(x)
y = asarray(y)
shape = broadcast_shapes(x.shape, y.shape)
out = x if condition else y
return broadcast_to(out, shape).astype(dtype)
else:
return elemwise(np.where, condition, x, y)
@derived_from(np)
def count_nonzero(a, axis=None):
return isnonzero(asarray(a)).astype(np.intp).sum(axis=axis)
@derived_from(np)
def flatnonzero(a):
return argwhere(asarray(a).ravel())[:, 0]
@derived_from(np)
def nonzero(a):
ind = argwhere(a)
if ind.ndim > 1:
return tuple(ind[:, i] for i in range(ind.shape[1]))
else:
return (ind,)
def _unravel_index_kernel(indices, func_kwargs):
return np.stack(np.unravel_index(indices, **func_kwargs))
@derived_from(np)
def unravel_index(indices, shape, order="C"):
if shape and indices.size:
unraveled_indices = tuple(
indices.map_blocks(
_unravel_index_kernel,
dtype=np.intp,
chunks=(((len(shape),),) + indices.chunks),
new_axis=0,
func_kwargs={"shape": shape, "order": order},
)
)
else:
unraveled_indices = tuple(empty((0,), dtype=np.intp, chunks=1) for i in shape)
return unraveled_indices
@wraps(np.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode="raise", order="C"):
if np.isscalar(dims):
dims = (dims,)
if is_dask_collection(dims) or any(is_dask_collection(d) for d in dims):
raise NotImplementedError(
f"Dask types are not supported in the `dims` argument: {dims!r}"
)
if is_arraylike(multi_index):
index_stack = asarray(multi_index)
else:
multi_index_arrs = broadcast_arrays(*multi_index)
index_stack = stack(multi_index_arrs)
if not np.isnan(index_stack.shape).any() and len(index_stack) != len(dims):
raise ValueError(
f"parameter multi_index must be a sequence of length {len(dims)}"
)
if not np.issubdtype(index_stack.dtype, np.signedinteger):
raise TypeError("only int indices permitted")
return index_stack.map_blocks(
np.ravel_multi_index,
dtype=np.intp,
chunks=index_stack.chunks[1:],
drop_axis=0,
dims=dims,
mode=mode,
order=order,
)
def _int_piecewise(x, *condlist, **kwargs):
return np.piecewise(
x, list(condlist), kwargs["funclist"], *kwargs["func_args"], **kwargs["func_kw"]
)
@derived_from(np)
def piecewise(x, condlist, funclist, *args, **kw):
return map_blocks(
_int_piecewise,
x,
*condlist,
dtype=x.dtype,
name="piecewise",
funclist=funclist,
func_args=args,
func_kw=kw,
)
def _select(*args, **kwargs):
"""
This is a version of :func:`numpy.select` that acceptes an arbitrary number of arguments and
splits them in half to create ``condlist`` and ``choicelist`` params.
"""
split_at = len(args) // 2
condlist = args[:split_at]
choicelist = args[split_at:]
return np.select(condlist, choicelist, **kwargs)
@derived_from(np)
def select(condlist, choicelist, default=0):
# Making the same checks that np.select
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError("list of cases must be same length as list of conditions")
if len(condlist) == 0:
raise ValueError("select with an empty condition list is not possible")
choicelist = [asarray(choice) for choice in choicelist]
try:
intermediate_dtype = result_type(*choicelist)
except TypeError as e:
msg = "Choicelist elements do not have a common dtype."
raise TypeError(msg) from e
blockwise_shape = tuple(range(choicelist[0].ndim))
condargs = [arg for elem in condlist for arg in (elem, blockwise_shape)]
choiceargs = [arg for elem in choicelist for arg in (elem, blockwise_shape)]
return blockwise(
_select,
blockwise_shape,
*condargs,
*choiceargs,
dtype=intermediate_dtype,
name="select",
default=default,
)
def _partition(total: int, divisor: int) -> tuple[tuple[int, ...], tuple[int, ...]]:
"""Given a total and a divisor, return two tuples: A tuple containing `divisor`
repeated the number of times it divides `total`, and length-1 or empty tuple
containing the remainder when `total` is divided by `divisor`. If `divisor` factors
`total`, i.e. if the remainder is 0, then `remainder` is empty.
"""
multiples = (divisor,) * (total // divisor)
remainder = (total % divisor,) if total % divisor else ()
return multiples, remainder
def aligned_coarsen_chunks(chunks: list[int], multiple: int) -> tuple[int, ...]:
"""
Returns a new chunking aligned with the coarsening multiple.
Any excess is at the end of the array.
Examples
--------
>>> aligned_coarsen_chunks(chunks=(1, 2, 3), multiple=4)
(4, 2)
>>> aligned_coarsen_chunks(chunks=(1, 20, 3, 4), multiple=4)
(4, 20, 4)
>>> aligned_coarsen_chunks(chunks=(20, 10, 15, 23, 24), multiple=10)
(20, 10, 20, 20, 20, 2)
"""
overflow = np.array(chunks) % multiple
excess = overflow.sum()
new_chunks = np.array(chunks) - overflow
# valid chunks are those that are already factorizable by `multiple`
chunk_validity = new_chunks == chunks
valid_inds, invalid_inds = np.where(chunk_validity)[0], np.where(~chunk_validity)[0]
# sort the invalid chunks by size (ascending), then concatenate the results of
# sorting the valid chunks by size (ascending)
chunk_modification_order = [
*invalid_inds[np.argsort(new_chunks[invalid_inds])],
*valid_inds[np.argsort(new_chunks[valid_inds])],
]
partitioned_excess, remainder = _partition(excess, multiple)
# add elements the partitioned excess to the smallest invalid chunks,
# then smallest valid chunks if needed.
for idx, extra in enumerate(partitioned_excess):
new_chunks[chunk_modification_order[idx]] += extra
# create excess chunk with remainder, if any remainder exists
new_chunks = np.array([*new_chunks, *remainder])
# remove 0-sized chunks
new_chunks = new_chunks[new_chunks > 0]
return tuple(new_chunks)
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes, trim_excess=False, **kwargs):
if not trim_excess and not all(x.shape[i] % div == 0 for i, div in axes.items()):
msg = f"Coarsening factors {axes} do not align with array shape {x.shape}."
raise ValueError(msg)
if reduction.__module__.startswith("dask."):
reduction = getattr(np, reduction.__name__)
new_chunks = {}
for i, div in axes.items():
aligned = aligned_coarsen_chunks(x.chunks[i], div)
if aligned != x.chunks[i]:
new_chunks[i] = aligned
if new_chunks:
x = x.rechunk(new_chunks)
name = "coarsen-" + tokenize(reduction, x, axes, trim_excess)
dsk = {
(name,)
+ key[1:]: (apply, chunk.coarsen, [reduction, key, axes, trim_excess], kwargs)
for key in flatten(x.__dask_keys__())
}
chunks = tuple(
tuple(int(bd // axes.get(i, 1)) for bd in bds) for i, bds in enumerate(x.chunks)
)
meta = reduction(np.empty((1,) * x.ndim, dtype=x.dtype), **kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
def split_at_breaks(array, breaks, axis=0):
"""Split an array into a list of arrays (using slices) at the given breaks
>>> split_at_breaks(np.arange(6), [3, 5])
[array([0, 1, 2]), array([3, 4]), array([5])]
"""
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@derived_from(np)
def insert(arr, obj, values, axis):
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
axis = validate_axis(axis, arr.ndim)
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
"da.insert only implemented for monotonic ``obj`` argument"
)
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, "ndim", 0) == 0:
# we need to turn values into a dask array
name = "values-" + tokenize(values)
dtype = getattr(values, "dtype", type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(
len(obj) if axis == n else s for n, s in enumerate(arr.shape)
)
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(
values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd) in enumerate(zip(arr.chunks, values.chunks))
)
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@derived_from(np)
def delete(arr, obj, axis):
"""
NOTE: If ``obj`` is a dask array it is implicitly computed when this function
is called.
"""
# axis is a required argument here to avoid needing to deal with the numpy
# default case (which reshapes the array to make it flat)
axis = validate_axis(axis, arr.ndim)
if isinstance(obj, slice):
tmp = np.arange(*obj.indices(arr.shape[axis]))
obj = tmp[::-1] if obj.step and obj.step < 0 else tmp
else:
obj = np.asarray(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
obj = np.unique(obj)
target_arr = split_at_breaks(arr, obj, axis)
target_arr = [
arr[
tuple(slice(1, None) if axis == n else slice(None) for n in range(arr.ndim))
]
if i != 0
else arr
for i, arr in enumerate(target_arr)
]
return concatenate(target_arr, axis=axis)
@derived_from(np)
def append(arr, values, axis=None):
# based on numpy.append
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(asanyarray(values))
axis = arr.ndim - 1
return concatenate((arr, values), axis=axis)
def _average(a, axis=None, weights=None, returned=False, is_masked=False):
# This was minimally modified from numpy.average
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
# Wrapper used by da.average or da.ma.average.
a = asanyarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size / avg.size)
else:
wgt = asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = result_type(a.dtype, wgt.dtype, "f8")
else:
result_dtype = result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights differ."
)
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ."
)
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis."
)
# setup wgt to broadcast along axis
wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if is_masked:
from dask.array.ma import getmaskarray
wgt = wgt * (~getmaskarray(a))
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl
if returned:
if scl.shape != avg.shape:
scl = broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
@derived_from(np)
def average(a, axis=None, weights=None, returned=False):
return _average(a, axis, weights, returned, is_masked=False)
@derived_from(np)
def tril(m, k=0):
m = asarray_safe(m, like=m)
mask = tri(
*m.shape[-2:],
k=k,
dtype=bool,
chunks=m.chunks[-2:],
like=meta_from_array(m) if _numpy_120 else None,
)
return where(mask, m, np.zeros_like(m, shape=(1,)))
@derived_from(np)
def triu(m, k=0):
m = asarray_safe(m, like=m)
mask = tri(
*m.shape[-2:],
k=k - 1,
dtype=bool,
chunks=m.chunks[-2:],
like=meta_from_array(m) if _numpy_120 else None,
)
return where(mask, np.zeros_like(m, shape=(1,)), m)
@derived_from(np)
def tril_indices(n, k=0, m=None, chunks="auto"):
return nonzero(tri(n, m, k=k, dtype=bool, chunks=chunks))
@derived_from(np)
def tril_indices_from(arr, k=0):
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)
@derived_from(np)
def triu_indices(n, k=0, m=None, chunks="auto"):
return nonzero(~tri(n, m, k=k - 1, dtype=bool, chunks=chunks))
@derived_from(np)
def triu_indices_from(arr, k=0):
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)
| 32.167915 | 112 | 0.609962 | from __future__ import annotations
import math
import warnings
from collections.abc import Iterable
from functools import partial, reduce, wraps
from numbers import Integral, Real
import numpy as np
from tlz import concat, interleave, sliding_window
from dask.array import chunk
from dask.array.core import (
Array,
asanyarray,
asarray,
blockwise,
broadcast_arrays,
broadcast_shapes,
broadcast_to,
concatenate,
elemwise,
from_array,
implements,
is_scalar_for_elemwise,
map_blocks,
stack,
tensordot_lookup,
)
from dask.array.creation import arange, diag, empty, indices, tri
from dask.array.einsumfuncs import einsum
from dask.array.numpy_compat import _numpy_120
from dask.array.reductions import reduction
from dask.array.ufunc import multiply, sqrt
from dask.array.utils import (
array_safe,
asarray_safe,
meta_from_array,
safe_wraps,
validate_axis,
)
from dask.array.wrap import ones
from dask.base import is_dask_collection, tokenize
from dask.core import flatten
from dask.delayed import Delayed, unpack_collections
from dask.highlevelgraph import HighLevelGraph
from dask.utils import apply, derived_from, funcname, is_arraylike, is_cupy_type
_range = range
@derived_from(np)
def array(x, dtype=None, ndmin=None, *, like=None):
if not _numpy_120 and like is not None:
raise RuntimeError("The use of ``like`` required NumPy >= 1.20")
x = asarray(x, like=like)
while ndmin is not None and x.ndim < ndmin:
x = x[None, :]
if dtype is not None and x.dtype != dtype:
x = x.astype(dtype)
return x
@derived_from(np)
def result_type(*args):
args = [a if is_scalar_for_elemwise(a) else a.dtype for a in args]
return np.result_type(*args)
@derived_from(np)
def atleast_3d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None, None]
elif x.ndim == 1:
x = x[None, :, None]
elif x.ndim == 2:
x = x[:, :, None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def atleast_2d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None, None]
elif x.ndim == 1:
x = x[None, :]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def atleast_1d(*arys):
new_arys = []
for x in arys:
x = asanyarray(x)
if x.ndim == 0:
x = x[None]
new_arys.append(x)
if len(new_arys) == 1:
return new_arys[0]
else:
return new_arys
@derived_from(np)
def vstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``vstack`` expects a sequence of arrays as the first argument"
)
tup = tuple(atleast_2d(x) for x in tup)
return concatenate(tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes)
@derived_from(np)
def hstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``hstack`` expects a sequence of arrays as the first argument"
)
if all(x.ndim == 1 for x in tup):
return concatenate(
tup, axis=0, allow_unknown_chunksizes=allow_unknown_chunksizes
)
else:
return concatenate(
tup, axis=1, allow_unknown_chunksizes=allow_unknown_chunksizes
)
@derived_from(np)
def dstack(tup, allow_unknown_chunksizes=False):
if isinstance(tup, Array):
raise NotImplementedError(
"``dstack`` expects a sequence of arrays as the first argument"
)
tup = tuple(atleast_3d(x) for x in tup)
return concatenate(tup, axis=2, allow_unknown_chunksizes=allow_unknown_chunksizes)
@derived_from(np)
def swapaxes(a, axis1, axis2):
if axis1 == axis2:
return a
if axis1 < 0:
axis1 = axis1 + a.ndim
if axis2 < 0:
axis2 = axis2 + a.ndim
ind = list(range(a.ndim))
out = list(ind)
out[axis1], out[axis2] = axis2, axis1
return blockwise(np.swapaxes, out, a, ind, axis1=axis1, axis2=axis2, dtype=a.dtype)
@derived_from(np)
def transpose(a, axes=None):
if axes:
if len(axes) != a.ndim:
raise ValueError("axes don't match array")
axes = tuple(d + a.ndim if d < 0 else d for d in axes)
else:
axes = tuple(range(a.ndim))[::-1]
return blockwise(
np.transpose, axes, a, tuple(range(a.ndim)), dtype=a.dtype, axes=axes
)
def flip(m, axis=None):
m = asanyarray(m)
sl = m.ndim * [slice(None)]
if axis is None:
axis = range(m.ndim)
if not isinstance(axis, Iterable):
axis = (axis,)
try:
for ax in axis:
sl[ax] = slice(None, None, -1)
except IndexError as e:
raise ValueError(
f"`axis` of {str(axis)} invalid for {str(m.ndim)}-D array"
) from e
sl = tuple(sl)
return m[sl]
@derived_from(np)
def flipud(m):
return flip(m, 0)
@derived_from(np)
def fliplr(m):
return flip(m, 1)
@derived_from(np)
def rot90(m, k=1, axes=(0, 1)):
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = asanyarray(m)
if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim:
raise ValueError(f"Axes={axes} out of range for array of ndim={m.ndim}.")
k %= 4
if k == 0:
return m[:]
if k == 2:
return flip(flip(m, axes[0]), axes[1])
axes_list = list(range(0, m.ndim))
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]])
if k == 1:
return transpose(flip(m, axes[1]), axes_list)
else:
# k == 3
return flip(transpose(m, axes_list), axes[1])
def _tensordot(a, b, axes, is_sparse):
x = max([a, b], key=lambda x: x.__array_priority__)
tensordot = tensordot_lookup.dispatch(type(x))
x = tensordot(a, b, axes=axes)
if is_sparse and len(axes[0]) == 1:
return x
else:
ind = [slice(None, None)] * x.ndim
for a in sorted(axes[0]):
ind.insert(a, None)
x = x[tuple(ind)]
return x
def _tensordot_is_sparse(x):
is_sparse = "sparse" in str(type(x._meta))
if is_sparse:
# exclude pydata sparse arrays, no workaround required for these in tensordot
is_sparse = "sparse._coo.core.COO" not in str(type(x._meta))
return is_sparse
@derived_from(np)
def tensordot(lhs, rhs, axes=2):
if not isinstance(lhs, Array):
lhs = from_array(lhs)
if not isinstance(rhs, Array):
rhs = from_array(rhs)
if isinstance(axes, Iterable):
left_axes, right_axes = axes
else:
left_axes = tuple(range(lhs.ndim - axes, lhs.ndim))
right_axes = tuple(range(0, axes))
if isinstance(left_axes, Integral):
left_axes = (left_axes,)
if isinstance(right_axes, Integral):
right_axes = (right_axes,)
if isinstance(left_axes, list):
left_axes = tuple(left_axes)
if isinstance(right_axes, list):
right_axes = tuple(right_axes)
is_sparse = _tensordot_is_sparse(lhs) or _tensordot_is_sparse(rhs)
if is_sparse and len(left_axes) == 1:
concatenate = True
else:
concatenate = False
dt = np.promote_types(lhs.dtype, rhs.dtype)
left_index = list(range(lhs.ndim))
right_index = list(range(lhs.ndim, lhs.ndim + rhs.ndim))
out_index = left_index + right_index
adjust_chunks = {}
for l, r in zip(left_axes, right_axes):
out_index.remove(right_index[r])
right_index[r] = left_index[l]
if concatenate:
out_index.remove(left_index[l])
else:
adjust_chunks[left_index[l]] = lambda c: 1
intermediate = blockwise(
_tensordot,
out_index,
lhs,
left_index,
rhs,
right_index,
dtype=dt,
concatenate=concatenate,
adjust_chunks=adjust_chunks,
axes=(left_axes, right_axes),
is_sparse=is_sparse,
)
if concatenate:
return intermediate
else:
return intermediate.sum(axis=left_axes)
@derived_from(np)
def dot(a, b):
return tensordot(a, b, axes=((a.ndim - 1,), (b.ndim - 2,)))
@derived_from(np)
def vdot(a, b):
return dot(a.conj().ravel(), b.ravel())
def _chunk_sum(a, axis=None, dtype=None, keepdims=None):
# Caution: this is not your conventional array-sum: due
# to the special nature of the preceding blockwise con-
# traction, each chunk is expected to have exactly the
# same shape, with a size of 1 for the dimension given
# by `axis` (the reduction axis). This makes mere ele-
# ment-wise addition of the arrays possible. Besides,
# the output can be merely squeezed to lose the `axis`-
# dimension when keepdims = False
if type(a) is list:
out = reduce(partial(np.add, dtype=dtype), a)
else:
out = a
if keepdims:
return out
else:
return out.squeeze(axis[0])
def _sum_wo_cat(a, axis=None, dtype=None):
if dtype is None:
dtype = getattr(np.zeros(1, dtype=a.dtype).sum(), "dtype", object)
if a.shape[axis] == 1:
return a.squeeze(axis)
return reduction(
a, _chunk_sum, _chunk_sum, axis=axis, dtype=dtype, concatenate=False
)
def _matmul(a, b):
xp = np
if is_cupy_type(a):
# This branch appears to be unnecessary since cupy
# version 9.0. See the following link:
# https://github.com/dask/dask/pull/8423#discussion_r768291271
# But it remains here for backward-compatibility.
# Consider removing it in a future version of dask.
import cupy
xp = cupy
chunk = xp.matmul(a, b)
# Since we have performed the contraction via xp.matmul
# but blockwise expects all dimensions back (including
# the contraction-axis in the 2nd-to-last position of
# the output), we must then put it back in the expected
# the position ourselves:
return chunk[..., xp.newaxis, :]
@derived_from(np)
def matmul(a, b):
a = asanyarray(a)
b = asanyarray(b)
if a.ndim == 0 or b.ndim == 0:
raise ValueError("`matmul` does not support scalars.")
a_is_1d = False
if a.ndim == 1:
a_is_1d = True
a = a[np.newaxis, :]
b_is_1d = False
if b.ndim == 1:
b_is_1d = True
b = b[:, np.newaxis]
if a.ndim < b.ndim:
a = a[(b.ndim - a.ndim) * (np.newaxis,)]
elif a.ndim > b.ndim:
b = b[(a.ndim - b.ndim) * (np.newaxis,)]
# out_ind includes all dimensions to prevent contraction
# in the blockwise below. We set the last two dimensions
# of the output to the contraction axis and the 2nd
# (last) dimension of b in that order
out_ind = tuple(range(a.ndim + 1))
# lhs_ind includes `a`/LHS dimensions
lhs_ind = tuple(range(a.ndim))
# on `b`/RHS everything above 2nd dimension, is the same
# as `a`, -2 dimension is "contracted" with the last dimension
# of `a`, last dimension of `b` is `b` specific
rhs_ind = tuple(range(a.ndim - 2)) + (lhs_ind[-1], a.ndim)
out = blockwise(
_matmul,
out_ind,
a,
lhs_ind,
b,
rhs_ind,
adjust_chunks={lhs_ind[-1]: 1},
dtype=result_type(a, b),
concatenate=False,
)
# Because contraction + concatenate in blockwise leads to high
# memory footprints, we want to avoid them. Instead we will perform
# blockwise (without contraction) followed by reduction. More about
# this issue: https://github.com/dask/dask/issues/6874
# We will also perform the reduction without concatenation
out = _sum_wo_cat(out, axis=-2)
if a_is_1d:
out = out.squeeze(-2)
if b_is_1d:
out = out.squeeze(-1)
return out
@derived_from(np)
def outer(a, b):
a = a.flatten()
b = b.flatten()
dtype = np.outer(a.dtype.type(), b.dtype.type()).dtype
return blockwise(np.outer, "ij", a, "i", b, "j", dtype=dtype)
def _inner_apply_along_axis(arr, func1d, func1d_axis, func1d_args, func1d_kwargs):
return np.apply_along_axis(func1d, func1d_axis, arr, *func1d_args, **func1d_kwargs)
@derived_from(np)
def apply_along_axis(func1d, axis, arr, *args, dtype=None, shape=None, **kwargs):
arr = asarray(arr)
# Verify that axis is valid and throw an error otherwise
axis = len(arr.shape[:axis])
# If necessary, infer dtype and shape of the output of func1d by calling it on test data.
if shape is None or dtype is None:
test_data = np.ones((1,), dtype=arr.dtype)
test_result = np.array(func1d(test_data, *args, **kwargs))
if shape is None:
shape = test_result.shape
if dtype is None:
dtype = test_result.dtype
# Rechunk so that func1d is applied over the full axis.
arr = arr.rechunk(
arr.chunks[:axis] + (arr.shape[axis : axis + 1],) + arr.chunks[axis + 1 :]
)
# Map func1d over the data to get the result
# Adds other axes as needed.
result = arr.map_blocks(
_inner_apply_along_axis,
name=funcname(func1d) + "-along-axis",
dtype=dtype,
chunks=(arr.chunks[:axis] + shape + arr.chunks[axis + 1 :]),
drop_axis=axis,
new_axis=list(range(axis, axis + len(shape), 1)),
func1d=func1d,
func1d_axis=axis,
func1d_args=args,
func1d_kwargs=kwargs,
)
return result
@derived_from(np)
def apply_over_axes(func, a, axes):
# Validate arguments
a = asarray(a)
try:
axes = tuple(axes)
except TypeError:
axes = (axes,)
sl = a.ndim * (slice(None),)
# Compute using `apply_along_axis`.
result = a
for i in axes:
result = apply_along_axis(func, i, result, 0)
# Restore original dimensionality or error.
if result.ndim == (a.ndim - 1):
result = result[sl[:i] + (None,)]
elif result.ndim != a.ndim:
raise ValueError(
"func must either preserve dimensionality of the input"
" or reduce it by one."
)
return result
@derived_from(np)
def ptp(a, axis=None):
return a.max(axis=axis) - a.min(axis=axis)
@derived_from(np)
def diff(a, n=1, axis=-1, prepend=None, append=None):
a = asarray(a)
n = int(n)
axis = int(axis)
if n == 0:
return a
if n < 0:
raise ValueError("order must be non-negative but got %d" % n)
combined = []
if prepend is not None:
prepend = asarray_safe(prepend, like=meta_from_array(a))
if prepend.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
prepend = broadcast_to(prepend, tuple(shape))
combined.append(prepend)
combined.append(a)
if append is not None:
append = asarray_safe(append, like=meta_from_array(a))
if append.ndim == 0:
shape = list(a.shape)
shape[axis] = 1
append = np.broadcast_to(append, tuple(shape))
combined.append(append)
if len(combined) > 1:
a = concatenate(combined, axis)
sl_1 = a.ndim * [slice(None)]
sl_2 = a.ndim * [slice(None)]
sl_1[axis] = slice(1, None)
sl_2[axis] = slice(None, -1)
sl_1 = tuple(sl_1)
sl_2 = tuple(sl_2)
r = a
for i in range(n):
r = r[sl_1] - r[sl_2]
return r
@derived_from(np)
def ediff1d(ary, to_end=None, to_begin=None):
ary = asarray(ary)
aryf = ary.flatten()
r = aryf[1:] - aryf[:-1]
r = [r]
if to_begin is not None:
r = [asarray(to_begin).flatten()] + r
if to_end is not None:
r = r + [asarray(to_end).flatten()]
r = concatenate(r)
return r
def _gradient_kernel(x, block_id, coord, axis, array_locs, grad_kwargs):
block_loc = block_id[axis]
if array_locs is not None:
coord = coord[array_locs[0][block_loc] : array_locs[1][block_loc]]
grad = np.gradient(x, coord, axis=axis, **grad_kwargs)
return grad
@derived_from(np)
def gradient(f, *varargs, axis=None, **kwargs):
f = asarray(f)
kwargs["edge_order"] = math.ceil(kwargs.get("edge_order", 1))
if kwargs["edge_order"] > 2:
raise ValueError("edge_order must be less than or equal to 2.")
drop_result_list = False
if axis is None:
axis = tuple(range(f.ndim))
elif isinstance(axis, Integral):
drop_result_list = True
axis = (axis,)
axis = validate_axis(axis, f.ndim)
if len(axis) != len(set(axis)):
raise ValueError("duplicate axes not allowed")
axis = tuple(ax % f.ndim for ax in axis)
if varargs == ():
varargs = (1,)
if len(varargs) == 1:
varargs = len(axis) * varargs
if len(varargs) != len(axis):
raise TypeError(
"Spacing must either be a single scalar, or a scalar / 1d-array per axis"
)
if issubclass(f.dtype.type, (np.bool8, Integral)):
f = f.astype(float)
elif issubclass(f.dtype.type, Real) and f.dtype.itemsize < 4:
f = f.astype(float)
results = []
for i, ax in enumerate(axis):
for c in f.chunks[ax]:
if np.min(c) < kwargs["edge_order"] + 1:
raise ValueError(
"Chunk size must be larger than edge_order + 1. "
"Minimum chunk for axis {} is {}. Rechunk to "
"proceed.".format(ax, np.min(c))
)
if np.isscalar(varargs[i]):
array_locs = None
else:
if isinstance(varargs[i], Array):
raise NotImplementedError("dask array coordinated is not supported.")
# coordinate position for each block taking overlap into account
chunk = np.array(f.chunks[ax])
array_loc_stop = np.cumsum(chunk) + 1
array_loc_start = array_loc_stop - chunk - 2
array_loc_stop[-1] -= 1
array_loc_start[0] = 0
array_locs = (array_loc_start, array_loc_stop)
results.append(
f.map_overlap(
_gradient_kernel,
dtype=f.dtype,
depth={j: 1 if j == ax else 0 for j in range(f.ndim)},
boundary="none",
coord=varargs[i],
axis=ax,
array_locs=array_locs,
grad_kwargs=kwargs,
)
)
if drop_result_list:
results = results[0]
return results
def _bincount_agg(bincounts, dtype, **kwargs):
if not isinstance(bincounts, list):
return bincounts
n = max(map(len, bincounts))
out = np.zeros_like(bincounts[0], shape=n, dtype=dtype)
for b in bincounts:
out[: len(b)] += b
return out
@derived_from(np)
def bincount(x, weights=None, minlength=0, split_every=None):
if x.ndim != 1:
raise ValueError("Input array must be one dimensional. Try using x.ravel()")
if weights is not None:
if weights.chunks != x.chunks:
raise ValueError("Chunks of input array x and weights must match.")
token = tokenize(x, weights, minlength)
args = [x, "i"]
if weights is not None:
meta = array_safe(np.bincount([1], weights=[1]), like=meta_from_array(x))
args.extend([weights, "i"])
else:
meta = array_safe(np.bincount([]), like=meta_from_array(x))
if minlength == 0:
output_size = (np.nan,)
else:
output_size = (minlength,)
chunked_counts = blockwise(
partial(np.bincount, minlength=minlength), "i", *args, token=token, meta=meta
)
chunked_counts._chunks = (
output_size * len(chunked_counts.chunks[0]),
*chunked_counts.chunks[1:],
)
from dask.array.reductions import _tree_reduce
output = _tree_reduce(
chunked_counts,
aggregate=partial(_bincount_agg, dtype=meta.dtype),
axis=(0,),
keepdims=True,
dtype=meta.dtype,
split_every=split_every,
concatenate=False,
)
output._chunks = (output_size, *chunked_counts.chunks[1:])
output._meta = meta
return output
@derived_from(np)
def digitize(a, bins, right=False):
bins = asarray_safe(bins, like=meta_from_array(a))
dtype = np.digitize(asarray_safe([0], like=bins), bins, right=False).dtype
return a.map_blocks(np.digitize, dtype=dtype, bins=bins, right=right)
def _searchsorted_block(x, y, side):
res = np.searchsorted(x, y, side=side)
# 0 is only correct for the first block of a, but blockwise doesn't have a way
res[res == 0] = -1
return res[np.newaxis, :]
@derived_from(np)
def searchsorted(a, v, side="left", sorter=None):
if a.ndim != 1:
raise ValueError("Input array a must be one dimensional")
if sorter is not None:
raise NotImplementedError(
"da.searchsorted with a sorter argument is not supported"
)
meta = np.searchsorted(a._meta, v._meta)
out = blockwise(
_searchsorted_block,
list(range(v.ndim + 1)),
a,
[0],
v,
list(range(1, v.ndim + 1)),
side,
None,
meta=meta,
adjust_chunks={0: 1},
)
a_chunk_sizes = array_safe((0, *a.chunks[0]), like=meta_from_array(a))
a_chunk_offsets = np.cumsum(a_chunk_sizes)[:-1]
a_chunk_offsets = a_chunk_offsets[(Ellipsis,) + v.ndim * (np.newaxis,)]
a_offsets = asarray(a_chunk_offsets, chunks=1)
out = where(out < 0, out, out + a_offsets)
out = out.max(axis=0)
out[out == -1] = 0
return out
def _linspace_from_delayed(start, stop, num=50):
linspace_name = "linspace-" + tokenize(start, stop, num)
(start_ref, stop_ref, num_ref), deps = unpack_collections([start, stop, num])
if len(deps) == 0:
return np.linspace(start, stop, num=num)
linspace_dsk = {(linspace_name, 0): (np.linspace, start_ref, stop_ref, num_ref)}
linspace_graph = HighLevelGraph.from_collections(
linspace_name, linspace_dsk, dependencies=deps
)
chunks = ((np.nan,),) if is_dask_collection(num) else ((num,),)
return Array(linspace_graph, linspace_name, chunks, dtype=float)
def _block_hist(x, bins, range=None, weights=None):
return np.histogram(x, bins, range=range, weights=weights)[0][np.newaxis]
def histogram(a, bins=None, range=None, normed=False, weights=None, density=None):
if isinstance(bins, Array):
scalar_bins = bins.ndim == 0
# ^ `np.ndim` is not implemented by Dask array.
elif isinstance(bins, Delayed):
scalar_bins = bins._length is None or bins._length == 1
else:
scalar_bins = np.ndim(bins) == 0
if bins is None or (scalar_bins and range is None):
raise ValueError(
"dask.array.histogram requires either specifying "
"bins as an iterable or specifying both a range and "
"the number of bins"
)
if weights is not None and weights.chunks != a.chunks:
raise ValueError("Input array and weights must have the same chunked structure")
if normed is not False:
raise ValueError(
"The normed= keyword argument has been deprecated. "
"Please use density instead. "
"See the numpy.histogram docstring for more information."
)
if density and scalar_bins and isinstance(bins, (Array, Delayed)):
raise NotImplementedError(
"When `density` is True, `bins` cannot be a scalar Dask object. "
"It must be a concrete number or a (possibly-delayed) array/sequence of bin edges."
)
for argname, val in [("bins", bins), ("range", range), ("weights", weights)]:
if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):
raise TypeError(
"Dask types besides Array and Delayed are not supported "
"for `histogram`. For argument `{}`, got: {!r}".format(argname, val)
)
if range is not None:
try:
if len(range) != 2:
raise ValueError(
f"range must be a sequence or array of length 2, but got {len(range)} items"
)
if isinstance(range, (Array, np.ndarray)) and range.shape != (2,):
raise ValueError(
f"range must be a 1-dimensional array of two items, but got an array of shape {range.shape}"
)
except TypeError:
raise TypeError(
f"Expected a sequence or array for range, not {range}"
) from None
token = tokenize(a, bins, range, weights, density)
name = "histogram-sum-" + token
if scalar_bins:
bins = _linspace_from_delayed(range[0], range[1], bins + 1)
# ^ NOTE `range[1]` is safe because of the above check, and the initial check
# that range must not be None if `scalar_bins`
else:
if not isinstance(bins, (Array, np.ndarray)):
bins = asarray(bins)
if bins.ndim != 1:
raise ValueError(
f"bins must be a 1-dimensional array or sequence, got shape {bins.shape}"
)
(bins_ref, range_ref), deps = unpack_collections([bins, range])
# Map the histogram to all bins, forming a 2D array of histograms, stacked for each chunk
if weights is None:
dsk = {
(name, i, 0): (_block_hist, k, bins_ref, range_ref)
for i, k in enumerate(flatten(a.__dask_keys__()))
}
dtype = np.histogram([])[0].dtype
else:
a_keys = flatten(a.__dask_keys__())
w_keys = flatten(weights.__dask_keys__())
dsk = {
(name, i, 0): (_block_hist, k, bins_ref, range_ref, w)
for i, (k, w) in enumerate(zip(a_keys, w_keys))
}
dtype = weights.dtype
deps = (a,) + deps
if weights is not None:
deps += (weights,)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
# Turn graph into a 2D Array of shape (nchunks, nbins)
nchunks = len(list(flatten(a.__dask_keys__())))
nbins = bins.size - 1 # since `bins` is 1D
chunks = ((1,) * nchunks, (nbins,))
mapped = Array(graph, name, chunks, dtype=dtype)
# Sum over chunks to get the final histogram
n = mapped.sum(axis=0)
# We need to replicate normed and density options from numpy
if density is not None:
if density:
db = asarray(np.diff(bins).astype(float), chunks=n.chunks)
return n / db / n.sum(), bins
else:
return n, bins
else:
return n, bins
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, density=None):
counts, edges = histogramdd(
(x, y),
bins=bins,
range=range,
normed=normed,
weights=weights,
density=density,
)
return counts, edges[0], edges[1]
def _block_histogramdd_rect(sample, bins, range, weights):
return np.histogramdd(sample, bins, range=range, weights=weights)[0:1]
def _block_histogramdd_multiarg(*args):
bins, range, weights = args[-3:]
sample = args[:-3]
return np.histogramdd(sample, bins=bins, range=range, weights=weights)[0:1]
def histogramdd(sample, bins, range=None, normed=None, weights=None, density=None):
# logic used in numpy.histogramdd to handle normed/density.
if normed is None:
if density is None:
density = False
elif density is None:
# an explicit normed argument was passed, alias it to the new name
density = normed
else:
raise TypeError("Cannot specify both 'normed' and 'density'")
# check if any dask collections (dc) were passed to bins= or
# range= these are unsupported.
dc_bins = is_dask_collection(bins)
if isinstance(bins, (list, tuple)):
dc_bins = dc_bins or any([is_dask_collection(b) for b in bins])
dc_range = (
any([is_dask_collection(r) for r in range]) if range is not None else False
)
if dc_bins or dc_range:
raise NotImplementedError(
"Passing dask collections to bins=... or range=... is not supported."
)
# generate token and name for task
token = tokenize(sample, bins, range, weights, density)
name = f"histogramdd-sum-{token}"
# N == total number of samples
# D == total number of dimensions
if hasattr(sample, "shape"):
if len(sample.shape) != 2:
raise ValueError("Single array input to histogramdd should be columnar")
else:
_, D = sample.shape
n_chunks = sample.numblocks[0]
rectangular_sample = True
# Require data to be chunked along the first axis only.
if sample.shape[1:] != sample.chunksize[1:]:
raise ValueError("Input array can only be chunked along the 0th axis.")
elif isinstance(sample, (tuple, list)):
rectangular_sample = False
D = len(sample)
n_chunks = sample[0].numblocks[0]
for i in _range(1, D):
if sample[i].chunks != sample[0].chunks:
raise ValueError("All coordinate arrays must be chunked identically.")
else:
raise ValueError(
"Incompatible sample. Must be a 2D array or a sequence of 1D arrays."
)
# Require only Array or Delayed objects for bins, range, and weights.
for argname, val in [("bins", bins), ("range", range), ("weights", weights)]:
if not isinstance(bins, (Array, Delayed)) and is_dask_collection(bins):
raise TypeError(
"Dask types besides Array and Delayed are not supported "
"for `histogramdd`. For argument `{}`, got: {!r}".format(argname, val)
)
# Require that the chunking of the sample and weights are compatible.
if weights is not None:
if rectangular_sample and weights.chunks[0] != sample.chunks[0]:
raise ValueError(
"Input array and weights must have the same shape "
"and chunk structure along the first dimension."
)
elif not rectangular_sample and weights.numblocks[0] != n_chunks:
raise ValueError(
"Input arrays and weights must have the same shape "
"and chunk structure."
)
# if bins is a list, tuple, then make sure the length is the same
# as the number dimensions.
if isinstance(bins, (list, tuple)):
if len(bins) != D:
raise ValueError(
"The dimension of bins must be equal to the dimension of the sample."
)
# if range is defined, check that it's the right length and also a
if range is not None:
if len(range) != D:
raise ValueError(
"range argument requires one entry, a min max pair, per dimension."
)
if not all(len(r) == 2 for r in range):
raise ValueError("range argument should be a sequence of pairs")
if isinstance(bins, int):
bins = (bins,) * D
if all(isinstance(b, int) for b in bins) and all(len(r) == 2 for r in range):
edges = [np.linspace(r[0], r[1], b + 1) for b, r in zip(bins, range)]
else:
edges = [np.asarray(b) for b in bins]
if rectangular_sample:
deps = (sample,)
else:
deps = tuple(sample)
if weights is not None:
w_keys = flatten(weights.__dask_keys__())
deps += (weights,)
dtype = weights.dtype
else:
w_keys = (None,) * n_chunks
dtype = np.histogramdd([])[0].dtype
column_zeros = tuple(0 for _ in _range(D))
if rectangular_sample:
sample_keys = flatten(sample.__dask_keys__())
dsk = {
(name, i, *column_zeros): (_block_histogramdd_rect, k, bins, range, w)
for i, (k, w) in enumerate(zip(sample_keys, w_keys))
}
else:
sample_keys = [
list(flatten(sample[i].__dask_keys__())) for i in _range(len(sample))
]
fused_on_chunk_keys = [
tuple(sample_keys[j][i] for j in _range(D)) for i in _range(n_chunks)
]
dsk = {
(name, i, *column_zeros): (
_block_histogramdd_multiarg,
*(*k, bins, range, w),
)
for i, (k, w) in enumerate(zip(fused_on_chunk_keys, w_keys))
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=deps)
all_nbins = tuple((b.size - 1,) for b in edges)
stacked_chunks = ((1,) * n_chunks, *all_nbins)
mapped = Array(graph, name, stacked_chunks, dtype=dtype)
n = mapped.sum(axis=0)
if density:
width_divider = np.ones(n.shape)
for i in _range(D):
shape = np.ones(D, int)
shape[i] = width_divider.shape[i]
width_divider *= np.diff(edges[i]).reshape(shape)
width_divider = asarray(width_divider, chunks=n.chunks)
return n / width_divider / n.sum(), edges
return n, [asarray(entry) for entry in edges]
@derived_from(np)
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
m = asarray(m)
if y is None:
dtype = np.result_type(m, np.float64)
else:
y = asarray(y)
dtype = np.result_type(m, y, np.float64)
X = array(m, ndmin=2, dtype=dtype)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
N = X.shape[1]
axis = 0
else:
N = X.shape[0]
axis = 1
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
if y is not None:
y = array(y, ndmin=2, dtype=dtype)
X = concatenate((X, y), axis)
X = X - X.mean(axis=1 - axis, keepdims=True)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
@derived_from(np)
def corrcoef(x, y=None, rowvar=1):
c = cov(x, y, rowvar)
if c.shape == ():
return c / c
d = diag(c)
d = d.reshape((d.shape[0], 1))
sqr_d = sqrt(d)
return (c / sqr_d) / sqr_d.T
@implements(np.round, np.round_)
@derived_from(np)
def round(a, decimals=0):
return a.map_blocks(np.round, decimals=decimals, dtype=a.dtype)
@implements(np.ndim)
@derived_from(np)
def ndim(a):
return a.ndim
@implements(np.iscomplexobj)
@derived_from(np)
def iscomplexobj(x):
return issubclass(x.dtype.type, np.complexfloating)
def _unique_internal(ar, indices, counts, return_inverse=False):
return_index = indices is not None
return_counts = counts is not None
u = np.unique(ar)
dt = [("values", u.dtype)]
if return_index:
dt.append(("indices", np.intp))
if return_inverse:
dt.append(("inverse", np.intp))
if return_counts:
dt.append(("counts", np.intp))
r = np.empty(u.shape, dtype=dt)
r["values"] = u
if return_inverse:
r["inverse"] = np.arange(len(r), dtype=np.intp)
if return_index or return_counts:
for i, v in enumerate(r["values"]):
m = ar == v
if return_index:
indices[m].min(keepdims=True, out=r["indices"][i : i + 1])
if return_counts:
counts[m].sum(keepdims=True, out=r["counts"][i : i + 1])
return r
def unique_no_structured_arr(
ar, return_index=False, return_inverse=False, return_counts=False
):
# can only compute values at the moment.
if (
return_index is not False
or return_inverse is not False
or return_counts is not False
):
raise ValueError(
"dask.array.unique does not support `return_index`, `return_inverse` "
"or `return_counts` with array types that don't support structured "
"arrays."
)
ar = ar.ravel()
args = [ar, "i"]
meta = meta_from_array(ar)
out = blockwise(np.unique, "i", *args, meta=meta)
out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)
out_parts = [out]
name = "unique-aggregate-" + out.name
dsk = {
(name, 0): (
(np.unique,)
+ tuple(
(np.concatenate, o.__dask_keys__())
if hasattr(o, "__dask_keys__")
else o
for o in out_parts
)
)
}
dependencies = [o for o in out_parts if hasattr(o, "__dask_keys__")]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
chunks = ((np.nan,),)
out = Array(graph, name, chunks, meta=meta)
result = [out]
if len(result) == 1:
result = result[0]
else:
result = tuple(result)
return result
@derived_from(np)
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
# `unique_no_structured_arr` implementation, otherwise (e.g., NumPy) just
# continue as normal.
try:
meta = meta_from_array(ar)
np.empty_like(meta, dtype=[("a", int), ("b", float)])
except TypeError:
return unique_no_structured_arr(
ar,
return_index=return_index,
return_inverse=return_inverse,
return_counts=return_counts,
)
ar = ar.ravel()
# Run unique on each chunk and collect results in a Dask Array of
# unknown size.
args = [ar, "i"]
out_dtype = [("values", ar.dtype)]
if return_index:
args.extend([arange(ar.shape[0], dtype=np.intp, chunks=ar.chunks[0]), "i"])
out_dtype.append(("indices", np.intp))
else:
args.extend([None, None])
if return_counts:
args.extend([ones((ar.shape[0],), dtype=np.intp, chunks=ar.chunks[0]), "i"])
out_dtype.append(("counts", np.intp))
else:
args.extend([None, None])
out = blockwise(_unique_internal, "i", *args, dtype=out_dtype, return_inverse=False)
out._chunks = tuple((np.nan,) * len(c) for c in out.chunks)
# Take the results from the unique chunks and do the following.
#
# 1. Collect all results as arguments.
# 2. Concatenate each result into one big array.
# 3. Pass all results as arguments to the internal unique again.
#
# TODO: This should be replaced with a tree reduction using this strategy.
# xref: https://github.com/dask/dask/issues/2851
out_parts = [out["values"]]
if return_index:
out_parts.append(out["indices"])
else:
out_parts.append(None)
if return_counts:
out_parts.append(out["counts"])
else:
out_parts.append(None)
name = "unique-aggregate-" + out.name
dsk = {
(name, 0): (
(_unique_internal,)
+ tuple(
(np.concatenate, o.__dask_keys__())
if hasattr(o, "__dask_keys__")
else o
for o in out_parts
)
+ (return_inverse,)
)
}
out_dtype = [("values", ar.dtype)]
if return_index:
out_dtype.append(("indices", np.intp))
if return_inverse:
out_dtype.append(("inverse", np.intp))
if return_counts:
out_dtype.append(("counts", np.intp))
dependencies = [o for o in out_parts if hasattr(o, "__dask_keys__")]
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
chunks = ((np.nan,),)
out = Array(graph, name, chunks, out_dtype)
# Split out all results to return to the user.
result = [out["values"]]
if return_index:
result.append(out["indices"])
if return_inverse:
# Using the returned unique values and arange of unknown length, find
# each value matching a unique value and replace it with its
# corresponding index or `0`. There should be only one entry for this
# index in axis `1` (the one of unknown length). Reduce axis `1`
# through summing to get an array with known dimensionality and the
# mapping of the original values.
mtches = (ar[:, None] == out["values"][None, :]).astype(np.intp)
result.append((mtches * out["inverse"]).sum(axis=1))
if return_counts:
result.append(out["counts"])
if len(result) == 1:
result = result[0]
else:
result = tuple(result)
return result
def _isin_kernel(element, test_elements, assume_unique=False):
values = np.in1d(element.ravel(), test_elements, assume_unique=assume_unique)
return values.reshape(element.shape + (1,) * test_elements.ndim)
@safe_wraps(getattr(np, "isin", None))
def isin(element, test_elements, assume_unique=False, invert=False):
element = asarray(element)
test_elements = asarray(test_elements)
element_axes = tuple(range(element.ndim))
test_axes = tuple(i + element.ndim for i in range(test_elements.ndim))
mapped = blockwise(
_isin_kernel,
element_axes + test_axes,
element,
element_axes,
test_elements,
test_axes,
adjust_chunks={axis: lambda _: 1 for axis in test_axes},
dtype=bool,
assume_unique=assume_unique,
)
result = mapped.any(axis=test_axes)
if invert:
result = ~result
return result
@derived_from(np)
def roll(array, shift, axis=None):
result = array
if axis is None:
result = ravel(result)
if not isinstance(shift, Integral):
raise TypeError(
"Expect `shift` to be an instance of Integral when `axis` is None."
)
shift = (shift,)
axis = (0,)
else:
try:
len(shift)
except TypeError:
shift = (shift,)
try:
len(axis)
except TypeError:
axis = (axis,)
if len(shift) != len(axis):
raise ValueError("Must have the same number of shifts as axes.")
for i, s in zip(axis, shift):
s = -s
s %= result.shape[i]
sl1 = result.ndim * [slice(None)]
sl2 = result.ndim * [slice(None)]
sl1[i] = slice(s, None)
sl2[i] = slice(None, s)
sl1 = tuple(sl1)
sl2 = tuple(sl2)
result = concatenate([result[sl1], result[sl2]], axis=i)
result = result.reshape(array.shape)
# Ensure that the output is always a new array object
result = result.copy() if result is array else result
return result
@derived_from(np)
def shape(array):
return array.shape
@derived_from(np)
def union1d(ar1, ar2):
return unique(concatenate((ar1.ravel(), ar2.ravel())))
@derived_from(np)
def ravel(array_like):
return asanyarray(array_like).reshape((-1,))
@derived_from(np)
def expand_dims(a, axis):
if type(axis) not in (tuple, list):
axis = (axis,)
out_ndim = len(axis) + a.ndim
axis = validate_axis(axis, out_ndim)
shape_it = iter(a.shape)
shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
return a.reshape(shape)
@derived_from(np)
def squeeze(a, axis=None):
if axis is None:
axis = tuple(i for i, d in enumerate(a.shape) if d == 1)
elif not isinstance(axis, tuple):
axis = (axis,)
if any(a.shape[i] != 1 for i in axis):
raise ValueError("cannot squeeze axis with size other than one")
axis = validate_axis(axis, a.ndim)
sl = tuple(0 if i in axis else slice(None) for i, s in enumerate(a.shape))
a = a[sl]
return a
@derived_from(np)
def compress(condition, a, axis=None):
if not is_arraylike(condition):
# Allow `condition` to be anything array-like, otherwise ensure `condition`
# is a numpy array.
condition = np.asarray(condition)
condition = condition.astype(bool)
a = asarray(a)
if condition.ndim != 1:
raise ValueError("Condition must be one dimensional")
if axis is None:
a = a.ravel()
axis = 0
axis = validate_axis(axis, a.ndim)
# Treat `condition` as filled with `False` (if it is too short)
a = a[
tuple(
slice(None, len(condition)) if i == axis else slice(None)
for i in range(a.ndim)
)
]
# Use `condition` to select along 1 dimension
a = a[tuple(condition if i == axis else slice(None) for i in range(a.ndim))]
return a
@derived_from(np)
def extract(condition, arr):
condition = asarray(condition).astype(bool)
arr = asarray(arr)
return compress(condition.ravel(), arr.ravel())
@derived_from(np)
def take(a, indices, axis=0):
axis = validate_axis(axis, a.ndim)
if isinstance(a, np.ndarray) and isinstance(indices, Array):
return _take_dask_array_from_numpy(a, indices, axis)
else:
return a[(slice(None),) * axis + (indices,)]
def _take_dask_array_from_numpy(a, indices, axis):
assert isinstance(a, np.ndarray)
assert isinstance(indices, Array)
return indices.map_blocks(
lambda block: np.take(a, block, axis), chunks=indices.chunks, dtype=a.dtype
)
@derived_from(np)
def around(x, decimals=0):
return map_blocks(partial(np.around, decimals=decimals), x, dtype=x.dtype)
def _asarray_isnull(values):
import pandas as pd
return np.asarray(pd.isnull(values))
def isnull(values):
# eagerly raise ImportError, if pandas isn't available
import pandas as pd
return elemwise(_asarray_isnull, values, dtype="bool")
def notnull(values):
return ~isnull(values)
@derived_from(np)
def isclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
func = partial(np.isclose, rtol=rtol, atol=atol, equal_nan=equal_nan)
return elemwise(func, arr1, arr2, dtype="bool")
@derived_from(np)
def allclose(arr1, arr2, rtol=1e-5, atol=1e-8, equal_nan=False):
return isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=equal_nan).all()
def variadic_choose(a, *choices):
return np.choose(a, choices)
@derived_from(np)
def choose(a, choices):
return elemwise(variadic_choose, a, *choices)
def _isnonzero_vec(v):
return bool(np.count_nonzero(v))
_isnonzero_vec = np.vectorize(_isnonzero_vec, otypes=[bool])
def isnonzero(a):
if a.dtype.kind in {"U", "S"}:
return a.map_blocks(_isnonzero_vec, dtype=bool)
try:
np.zeros(tuple(), dtype=a.dtype).astype(bool)
except ValueError:
return index_stack.map_blocks(
np.ravel_multi_index,
dtype=np.intp,
chunks=index_stack.chunks[1:],
drop_axis=0,
dims=dims,
mode=mode,
order=order,
)
def _int_piecewise(x, *condlist, **kwargs):
return np.piecewise(
x, list(condlist), kwargs["funclist"], *kwargs["func_args"], **kwargs["func_kw"]
)
@derived_from(np)
def piecewise(x, condlist, funclist, *args, **kw):
return map_blocks(
_int_piecewise,
x,
*condlist,
dtype=x.dtype,
name="piecewise",
funclist=funclist,
func_args=args,
func_kw=kw,
)
def _select(*args, **kwargs):
split_at = len(args) // 2
condlist = args[:split_at]
choicelist = args[split_at:]
return np.select(condlist, choicelist, **kwargs)
@derived_from(np)
def select(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
raise ValueError("list of cases must be same length as list of conditions")
if len(condlist) == 0:
raise ValueError("select with an empty condition list is not possible")
choicelist = [asarray(choice) for choice in choicelist]
try:
intermediate_dtype = result_type(*choicelist)
except TypeError as e:
msg = "Choicelist elements do not have a common dtype."
raise TypeError(msg) from e
blockwise_shape = tuple(range(choicelist[0].ndim))
condargs = [arg for elem in condlist for arg in (elem, blockwise_shape)]
choiceargs = [arg for elem in choicelist for arg in (elem, blockwise_shape)]
return blockwise(
_select,
blockwise_shape,
*condargs,
*choiceargs,
dtype=intermediate_dtype,
name="select",
default=default,
)
def _partition(total: int, divisor: int) -> tuple[tuple[int, ...], tuple[int, ...]]:
multiples = (divisor,) * (total // divisor)
remainder = (total % divisor,) if total % divisor else ()
return multiples, remainder
def aligned_coarsen_chunks(chunks: list[int], multiple: int) -> tuple[int, ...]:
overflow = np.array(chunks) % multiple
excess = overflow.sum()
new_chunks = np.array(chunks) - overflow
chunk_validity = new_chunks == chunks
valid_inds, invalid_inds = np.where(chunk_validity)[0], np.where(~chunk_validity)[0]
chunk_modification_order = [
*invalid_inds[np.argsort(new_chunks[invalid_inds])],
*valid_inds[np.argsort(new_chunks[valid_inds])],
]
partitioned_excess, remainder = _partition(excess, multiple)
for idx, extra in enumerate(partitioned_excess):
new_chunks[chunk_modification_order[idx]] += extra
new_chunks = np.array([*new_chunks, *remainder])
new_chunks = new_chunks[new_chunks > 0]
return tuple(new_chunks)
@wraps(chunk.coarsen)
def coarsen(reduction, x, axes, trim_excess=False, **kwargs):
if not trim_excess and not all(x.shape[i] % div == 0 for i, div in axes.items()):
msg = f"Coarsening factors {axes} do not align with array shape {x.shape}."
raise ValueError(msg)
if reduction.__module__.startswith("dask."):
reduction = getattr(np, reduction.__name__)
new_chunks = {}
for i, div in axes.items():
aligned = aligned_coarsen_chunks(x.chunks[i], div)
if aligned != x.chunks[i]:
new_chunks[i] = aligned
if new_chunks:
x = x.rechunk(new_chunks)
name = "coarsen-" + tokenize(reduction, x, axes, trim_excess)
dsk = {
(name,)
+ key[1:]: (apply, chunk.coarsen, [reduction, key, axes, trim_excess], kwargs)
for key in flatten(x.__dask_keys__())
}
chunks = tuple(
tuple(int(bd // axes.get(i, 1)) for bd in bds) for i, bds in enumerate(x.chunks)
)
meta = reduction(np.empty((1,) * x.ndim, dtype=x.dtype), **kwargs)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, meta=meta)
def split_at_breaks(array, breaks, axis=0):
padded_breaks = concat([[None], breaks, [None]])
slices = [slice(i, j) for i, j in sliding_window(2, padded_breaks)]
preslice = (slice(None),) * axis
split_array = [array[preslice + (s,)] for s in slices]
return split_array
@derived_from(np)
def insert(arr, obj, values, axis):
axis = validate_axis(axis, arr.ndim)
if isinstance(obj, slice):
obj = np.arange(*obj.indices(arr.shape[axis]))
obj = np.asarray(obj)
scalar_obj = obj.ndim == 0
if scalar_obj:
obj = np.atleast_1d(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
if (np.diff(obj) < 0).any():
raise NotImplementedError(
"da.insert only implemented for monotonic ``obj`` argument"
)
split_arr = split_at_breaks(arr, np.unique(obj), axis)
if getattr(values, "ndim", 0) == 0:
name = "values-" + tokenize(values)
dtype = getattr(values, "dtype", type(values))
values = Array({(name,): values}, name, chunks=(), dtype=dtype)
values_shape = tuple(
len(obj) if axis == n else s for n, s in enumerate(arr.shape)
)
values = broadcast_to(values, values_shape)
elif scalar_obj:
values = values[(slice(None),) * axis + (None,)]
values_chunks = tuple(
values_bd if axis == n else arr_bd
for n, (arr_bd, values_bd) in enumerate(zip(arr.chunks, values.chunks))
)
values = values.rechunk(values_chunks)
counts = np.bincount(obj)[:-1]
values_breaks = np.cumsum(counts[counts > 0])
split_values = split_at_breaks(values, values_breaks, axis)
interleaved = list(interleave([split_arr, split_values]))
interleaved = [i for i in interleaved if i.nbytes]
return concatenate(interleaved, axis=axis)
@derived_from(np)
def delete(arr, obj, axis):
axis = validate_axis(axis, arr.ndim)
if isinstance(obj, slice):
tmp = np.arange(*obj.indices(arr.shape[axis]))
obj = tmp[::-1] if obj.step and obj.step < 0 else tmp
else:
obj = np.asarray(obj)
obj = np.where(obj < 0, obj + arr.shape[axis], obj)
obj = np.unique(obj)
target_arr = split_at_breaks(arr, obj, axis)
target_arr = [
arr[
tuple(slice(1, None) if axis == n else slice(None) for n in range(arr.ndim))
]
if i != 0
else arr
for i, arr in enumerate(target_arr)
]
return concatenate(target_arr, axis=axis)
@derived_from(np)
def append(arr, values, axis=None):
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(asanyarray(values))
axis = arr.ndim - 1
return concatenate((arr, values), axis=axis)
def _average(a, axis=None, weights=None, returned=False, is_masked=False):
a = asanyarray(a)
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.size / avg.size)
else:
wgt = asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = result_type(a.dtype, wgt.dtype, "f8")
else:
result_dtype = result_type(a.dtype, wgt.dtype)
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights differ."
)
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ."
)
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis."
)
wgt = broadcast_to(wgt, (a.ndim - 1) * (1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if is_masked:
from dask.array.ma import getmaskarray
wgt = wgt * (~getmaskarray(a))
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = multiply(a, wgt, dtype=result_dtype).sum(axis) / scl
if returned:
if scl.shape != avg.shape:
scl = broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
@derived_from(np)
def average(a, axis=None, weights=None, returned=False):
return _average(a, axis, weights, returned, is_masked=False)
@derived_from(np)
def tril(m, k=0):
m = asarray_safe(m, like=m)
mask = tri(
*m.shape[-2:],
k=k,
dtype=bool,
chunks=m.chunks[-2:],
like=meta_from_array(m) if _numpy_120 else None,
)
return where(mask, m, np.zeros_like(m, shape=(1,)))
@derived_from(np)
def triu(m, k=0):
m = asarray_safe(m, like=m)
mask = tri(
*m.shape[-2:],
k=k - 1,
dtype=bool,
chunks=m.chunks[-2:],
like=meta_from_array(m) if _numpy_120 else None,
)
return where(mask, np.zeros_like(m, shape=(1,)), m)
@derived_from(np)
def tril_indices(n, k=0, m=None, chunks="auto"):
return nonzero(tri(n, m, k=k, dtype=bool, chunks=chunks))
@derived_from(np)
def tril_indices_from(arr, k=0):
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)
@derived_from(np)
def triu_indices(n, k=0, m=None, chunks="auto"):
return nonzero(~tri(n, m, k=k - 1, dtype=bool, chunks=chunks))
@derived_from(np)
def triu_indices_from(arr, k=0):
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1], chunks=arr.chunks)
| true | true |
f7313e3ab3e8fc56b748beb5452013bcc48f9018 | 1,982 | py | Python | lifegame.py | cuboktahedron/Rascon | 7f754434424c6a0b5f61c96c33c5d2c4acf04a4c | [
"MIT"
] | null | null | null | lifegame.py | cuboktahedron/Rascon | 7f754434424c6a0b5f61c96c33c5d2c4acf04a4c | [
"MIT"
] | null | null | null | lifegame.py | cuboktahedron/Rascon | 7f754434424c6a0b5f61c96c33c5d2c4acf04a4c | [
"MIT"
] | null | null | null | import copy
from datetime import datetime
class LifeGame:
def __init__(self, width, height):
self.__width = width
self.__height = height
self.__cells = [[False for x in range(0, width)] for y in range(0, height)]
self.__fps = 2
self._next_ts = datetime.now().timestamp()
def set(self, x, y, status):
self.__cells[y][x] = status
def get(self, x, y):
return self.__cells[y][x]
def next(self):
cur_ts = datetime.now().timestamp()
if cur_ts < self._next_ts:
return
self._next_ts = cur_ts + (1 / self.__fps)
nextCells = [[False for x in range(0, self.__width)] for y in range(0, self.__height)]
for x in range(0, self.__width):
for y in range(0, self.__height):
nextCells[y][x] = self.__next_cell(x, y)
self.__cells = nextCells
def __next_cell(self, x, y):
surroundCells = [
self.__is_alive(x - 1, y - 1),
self.__is_alive(x - 1, y + 0),
self.__is_alive(x - 1, y + 1),
self.__is_alive(x + 0, y - 1),
self.__is_alive(x + 0, y + 1),
self.__is_alive(x + 1, y - 1),
self.__is_alive(x + 1, y + 0),
self.__is_alive(x + 1, y + 1),
]
aliveCount = len(list(filter(lambda cell: cell, surroundCells)))
if self.__cells[y][x]:
return 2 <= aliveCount <= 3
else:
return aliveCount == 3
def __is_alive(self, x, y):
x = self.__width - 1 if x < 0 else x
x = 0 if x >= self.__width else x
y = self.__height -1 if y < 0 else y
y = 0 if y >= self.__height else y
return self.__cells[y][x]
def __is_outer(self, x, y):
return x < 0 or x >= self.__width or y < 0 or y >= self.__height
def get_cells(self):
return copy.deepcopy(self.__cells)
| 30.492308 | 95 | 0.519173 | import copy
from datetime import datetime
class LifeGame:
def __init__(self, width, height):
self.__width = width
self.__height = height
self.__cells = [[False for x in range(0, width)] for y in range(0, height)]
self.__fps = 2
self._next_ts = datetime.now().timestamp()
def set(self, x, y, status):
self.__cells[y][x] = status
def get(self, x, y):
return self.__cells[y][x]
def next(self):
cur_ts = datetime.now().timestamp()
if cur_ts < self._next_ts:
return
self._next_ts = cur_ts + (1 / self.__fps)
nextCells = [[False for x in range(0, self.__width)] for y in range(0, self.__height)]
for x in range(0, self.__width):
for y in range(0, self.__height):
nextCells[y][x] = self.__next_cell(x, y)
self.__cells = nextCells
def __next_cell(self, x, y):
surroundCells = [
self.__is_alive(x - 1, y - 1),
self.__is_alive(x - 1, y + 0),
self.__is_alive(x - 1, y + 1),
self.__is_alive(x + 0, y - 1),
self.__is_alive(x + 0, y + 1),
self.__is_alive(x + 1, y - 1),
self.__is_alive(x + 1, y + 0),
self.__is_alive(x + 1, y + 1),
]
aliveCount = len(list(filter(lambda cell: cell, surroundCells)))
if self.__cells[y][x]:
return 2 <= aliveCount <= 3
else:
return aliveCount == 3
def __is_alive(self, x, y):
x = self.__width - 1 if x < 0 else x
x = 0 if x >= self.__width else x
y = self.__height -1 if y < 0 else y
y = 0 if y >= self.__height else y
return self.__cells[y][x]
def __is_outer(self, x, y):
return x < 0 or x >= self.__width or y < 0 or y >= self.__height
def get_cells(self):
return copy.deepcopy(self.__cells)
| true | true |
f7313e94c331bb92c55fd8f8212ee3abca3087ee | 4,287 | py | Python | dockermon.py | CyberInt/dockermon | a8733b9395cb1b551971f17c31d7f4a8268bb969 | [
"MIT"
] | 10 | 2015-06-27T06:06:01.000Z | 2021-02-15T04:04:02.000Z | dockermon.py | CyberInt/dockermon | a8733b9395cb1b551971f17c31d7f4a8268bb969 | [
"MIT"
] | 2 | 2015-08-09T14:10:25.000Z | 2016-05-14T09:25:43.000Z | dockermon.py | CyberInt/dockermon | a8733b9395cb1b551971f17c31d7f4a8268bb969 | [
"MIT"
] | 7 | 2016-02-03T03:24:09.000Z | 2021-02-15T04:08:40.000Z | #!/usr/bin/env python
"""docker monitor using docker /events HTTP streaming API"""
from contextlib import closing
from functools import partial
from socket import socket, AF_UNIX
from subprocess import Popen, PIPE
from sys import stdout, version_info
import json
import shlex
if version_info[:2] < (3, 0):
from httplib import OK as HTTP_OK
from urlparse import urlparse
else:
from http.client import OK as HTTP_OK
from urllib.parse import urlparse
__version__ = '0.2.2'
bufsize = 1024
default_sock_url = 'ipc:///var/run/docker.sock'
class DockermonError(Exception):
pass
def read_http_header(sock):
"""Read HTTP header from socket, return header and rest of data."""
buf = []
hdr_end = '\r\n\r\n'
while True:
buf.append(sock.recv(bufsize).decode('utf-8'))
data = ''.join(buf)
i = data.find(hdr_end)
if i == -1:
continue
return data[:i], data[i + len(hdr_end):]
def header_status(header):
"""Parse HTTP status line, return status (int) and reason."""
status_line = header[:header.find('\r')]
# 'HTTP/1.1 200 OK' -> (200, 'OK')
fields = status_line.split(None, 2)
return int(fields[1]), fields[2]
def connect(url):
"""Connect to UNIX or TCP socket.
url can be either tcp://<host>:port or ipc://<path>
"""
url = urlparse(url)
if url.scheme == 'tcp':
sock = socket()
netloc = tuple(url.netloc.rsplit(':', 1))
hostname = socket.gethostname()
elif url.scheme == 'ipc':
sock = socket(AF_UNIX)
netloc = url.path
hostname = 'localhost'
else:
raise ValueError('unknown socket type: %s' % url.scheme)
sock.connect(netloc)
return sock, hostname
def watch(callback, url=default_sock_url):
"""Watch docker events. Will call callback with each new event (dict).
url can be either tcp://<host>:port or ipc://<path>
"""
sock, hostname = connect(url)
request = 'GET /events HTTP/1.1\nHost: %s\n\n' % hostname
request = request.encode('utf-8')
with closing(sock):
sock.sendall(request)
header, payload = read_http_header(sock)
status, reason = header_status(header)
if status != HTTP_OK:
raise DockermonError('bad HTTP status: %s %s' % (status, reason))
# Messages are \r\n<size in hex><JSON payload>\r\n
buf = [payload]
while True:
chunk = sock.recv(bufsize)
if not chunk:
raise EOFError('socket closed')
buf.append(chunk.decode('utf-8'))
data = ''.join(buf)
i = data.find('\r\n')
if i == -1:
continue
size = int(data[:i], 16)
start = i + 2 # Skip initial \r\n
if len(data) < start + size + 2:
continue
payload = data[start:start+size]
callback(json.loads(payload))
buf = [data[start+size+2:]] # Skip \r\n suffix
def print_callback(msg):
"""Print callback, prints message to stdout as JSON in one line."""
json.dump(msg, stdout)
stdout.write('\n')
stdout.flush()
def prog_callback(prog, msg):
"""Program callback, calls prog with message in stdin"""
pipe = Popen(prog, stdin=PIPE)
data = json.dumps(msg)
pipe.stdin.write(data.encode('utf-8'))
pipe.stdin.close()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('--prog', default=None,
help='program to call (e.g. "jq --unbuffered .")')
parser.add_argument(
'--socket-url', default=default_sock_url,
help='socket url (ipc:///path/to/sock or tcp:///host:port)')
parser.add_argument(
'--version', help='print version and exit',
action='store_true', default=False)
args = parser.parse_args()
if args.version:
print('dockermon %s' % __version__)
raise SystemExit
if args.prog:
prog = shlex.split(args.prog)
callback = partial(prog_callback, prog)
else:
callback = print_callback
try:
watch(callback, args.socket_url)
except (KeyboardInterrupt, EOFError):
pass
| 28.203947 | 77 | 0.601586 |
from contextlib import closing
from functools import partial
from socket import socket, AF_UNIX
from subprocess import Popen, PIPE
from sys import stdout, version_info
import json
import shlex
if version_info[:2] < (3, 0):
from httplib import OK as HTTP_OK
from urlparse import urlparse
else:
from http.client import OK as HTTP_OK
from urllib.parse import urlparse
__version__ = '0.2.2'
bufsize = 1024
default_sock_url = 'ipc:///var/run/docker.sock'
class DockermonError(Exception):
pass
def read_http_header(sock):
buf = []
hdr_end = '\r\n\r\n'
while True:
buf.append(sock.recv(bufsize).decode('utf-8'))
data = ''.join(buf)
i = data.find(hdr_end)
if i == -1:
continue
return data[:i], data[i + len(hdr_end):]
def header_status(header):
status_line = header[:header.find('\r')]
fields = status_line.split(None, 2)
return int(fields[1]), fields[2]
def connect(url):
url = urlparse(url)
if url.scheme == 'tcp':
sock = socket()
netloc = tuple(url.netloc.rsplit(':', 1))
hostname = socket.gethostname()
elif url.scheme == 'ipc':
sock = socket(AF_UNIX)
netloc = url.path
hostname = 'localhost'
else:
raise ValueError('unknown socket type: %s' % url.scheme)
sock.connect(netloc)
return sock, hostname
def watch(callback, url=default_sock_url):
sock, hostname = connect(url)
request = 'GET /events HTTP/1.1\nHost: %s\n\n' % hostname
request = request.encode('utf-8')
with closing(sock):
sock.sendall(request)
header, payload = read_http_header(sock)
status, reason = header_status(header)
if status != HTTP_OK:
raise DockermonError('bad HTTP status: %s %s' % (status, reason))
buf = [payload]
while True:
chunk = sock.recv(bufsize)
if not chunk:
raise EOFError('socket closed')
buf.append(chunk.decode('utf-8'))
data = ''.join(buf)
i = data.find('\r\n')
if i == -1:
continue
size = int(data[:i], 16)
start = i + 2
if len(data) < start + size + 2:
continue
payload = data[start:start+size]
callback(json.loads(payload))
buf = [data[start+size+2:]]
def print_callback(msg):
json.dump(msg, stdout)
stdout.write('\n')
stdout.flush()
def prog_callback(prog, msg):
pipe = Popen(prog, stdin=PIPE)
data = json.dumps(msg)
pipe.stdin.write(data.encode('utf-8'))
pipe.stdin.close()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('--prog', default=None,
help='program to call (e.g. "jq --unbuffered .")')
parser.add_argument(
'--socket-url', default=default_sock_url,
help='socket url (ipc:///path/to/sock or tcp:///host:port)')
parser.add_argument(
'--version', help='print version and exit',
action='store_true', default=False)
args = parser.parse_args()
if args.version:
print('dockermon %s' % __version__)
raise SystemExit
if args.prog:
prog = shlex.split(args.prog)
callback = partial(prog_callback, prog)
else:
callback = print_callback
try:
watch(callback, args.socket_url)
except (KeyboardInterrupt, EOFError):
pass
| true | true |
f7313f100d4294fe5183c05e8c3ad109ceb0c790 | 16,944 | py | Python | pandas/tests/indexes/ranges/test_range.py | mujtahidalam/pandas | 526468c8fe6fc5157aaf2fce327c5ab2a3350f49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2021-06-17T12:54:33.000Z | 2021-06-17T12:54:33.000Z | pandas/tests/indexes/ranges/test_range.py | mujtahidalam/pandas | 526468c8fe6fc5157aaf2fce327c5ab2a3350f49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/ranges/test_range.py | mujtahidalam/pandas | 526468c8fe6fc5157aaf2fce327c5ab2a3350f49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from pandas.core.dtypes.common import ensure_platform_int
import pandas as pd
from pandas import (
Float64Index,
Index,
Int64Index,
RangeIndex,
)
import pandas._testing as tm
from pandas.tests.indexes.test_numeric import Numeric
# aliases to make some tests easier to read
RI = RangeIndex
I64 = Int64Index
F64 = Float64Index
OI = Index
class TestRangeIndex(Numeric):
_index_cls = RangeIndex
@pytest.fixture
def simple_index(self) -> Index:
return self._index_cls(start=0, stop=20, step=2)
@pytest.fixture(
params=[
RangeIndex(start=0, stop=20, step=2, name="foo"),
RangeIndex(start=18, stop=-1, step=-2, name="bar"),
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return request.param
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self, simple_index):
index = simple_index
with pytest.raises(ValueError, match="^Length"):
index.names = ["roger", "harold"]
@pytest.mark.parametrize(
"index, start, stop, step",
[
(RangeIndex(5), 0, 5, 1),
(RangeIndex(0, 5), 0, 5, 1),
(RangeIndex(5, step=2), 0, 5, 2),
(RangeIndex(1, 5, 2), 1, 5, 2),
],
)
def test_start_stop_step_attrs(self, index, start, stop, step):
# GH 25710
assert index.start == start
assert index.stop == stop
assert index.step == step
@pytest.mark.parametrize("attr_name", ["_start", "_stop", "_step"])
def test_deprecated_start_stop_step_attrs(self, attr_name, simple_index):
# GH 26581
idx = simple_index
with tm.assert_produces_warning(FutureWarning):
getattr(idx, attr_name)
def test_copy(self):
i = RangeIndex(5, name="Foo")
i_copy = i.copy()
assert i_copy is not i
assert i_copy.identical(i)
assert i_copy._range == range(0, 5, 1)
assert i_copy.name == "Foo"
def test_repr(self):
i = RangeIndex(5, name="Foo")
result = repr(i)
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
i = RangeIndex(5, 0, -1)
result = repr(i)
expected = "RangeIndex(start=5, stop=0, step=-1)"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
def test_insert(self):
idx = RangeIndex(5, name="Foo")
result = idx[1:4]
# test 0th element
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))
# GH 18295 (test missing)
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
for na in [np.nan, None, pd.NA]:
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
result = RangeIndex(5).insert(1, pd.NaT)
expected = Index([0, pd.NaT, 1, 2, 3, 4], dtype=object)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = RangeIndex(5, name="Foo")
expected = idx[1:].astype(int)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = idx[:-1].astype(int)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises((IndexError, ValueError), match=msg):
# either depending on numpy version
result = idx.delete(len(idx))
def test_view(self):
i = RangeIndex(0, name="Foo")
i_view = i.view()
assert i_view.name == "Foo"
i_view = i.view("i8")
tm.assert_numpy_array_equal(i.values, i_view)
i_view = i.view(RangeIndex)
tm.assert_index_equal(i, i_view)
def test_dtype(self, simple_index):
index = simple_index
assert index.dtype == np.int64
def test_cache(self):
# GH 26565, GH26617, GH35432
# This test checks whether _cache has been set.
# Calling RangeIndex._cache["_data"] creates an int64 array of the same length
# as the RangeIndex and stores it in _cache.
idx = RangeIndex(0, 100, 10)
assert idx._cache == {}
repr(idx)
assert idx._cache == {}
str(idx)
assert idx._cache == {}
idx.get_loc(20)
assert idx._cache == {}
90 in idx # True
assert idx._cache == {}
91 in idx # False
assert idx._cache == {}
idx.all()
assert idx._cache == {}
idx.any()
assert idx._cache == {}
for _ in idx:
pass
assert idx._cache == {}
idx.format()
assert idx._cache == {}
df = pd.DataFrame({"a": range(10)}, index=idx)
str(df)
assert idx._cache == {}
df.loc[50]
assert idx._cache == {}
with pytest.raises(KeyError, match="51"):
df.loc[51]
assert idx._cache == {}
df.loc[10:50]
assert idx._cache == {}
df.iloc[5:10]
assert idx._cache == {}
# idx._cache should contain a _data entry after call to idx._data
idx._data
assert isinstance(idx._data, np.ndarray)
assert idx._data is idx._data # check cached value is reused
assert len(idx._cache) == 1
expected = np.arange(0, 100, 10, dtype="int64")
tm.assert_numpy_array_equal(idx._cache["_data"], expected)
def test_is_monotonic(self):
index = RangeIndex(0, 20, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(2, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_equals_range(self):
equiv_pairs = [
(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
(RangeIndex(0), RangeIndex(1, -1, 3)),
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)),
]
for left, right in equiv_pairs:
assert left.equals(right)
assert right.equals(left)
def test_logical_compat(self, simple_index):
idx = simple_index
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self, simple_index):
index = simple_index
i = Index(index.copy())
assert i.identical(index)
# we don't allow object dtype for RangeIndex
if isinstance(index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(index.copy(dtype=object))
assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
assert not index.copy(dtype=object).identical(index.copy(dtype="int64"))
def test_nbytes(self):
# memory savings vs int index
i = RangeIndex(0, 1000)
assert i.nbytes < i._int64index.nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
assert i.nbytes == i2.nbytes
@pytest.mark.parametrize(
"start,stop,step",
[
# can't
("foo", "bar", "baz"),
# shouldn't
("0", "1", "2"),
],
)
def test_cant_or_shouldnt_cast(self, start, stop, step):
msg = f"Wrong type {type(start)} for value {start}"
with pytest.raises(TypeError, match=msg):
RangeIndex(start, stop, step)
def test_view_index(self, simple_index):
index = simple_index
index.view(Index)
def test_prevent_casting(self, simple_index):
index = simple_index
result = index.astype("O")
assert result.dtype == np.object_
def test_repr_roundtrip(self, simple_index):
index = simple_index
tm.assert_index_equal(eval(repr(index)), index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
assert idx.name == idx[1:].name
def test_has_duplicates(self, index):
assert index.is_unique
assert not index.has_duplicates
def test_extended_gcd(self, simple_index):
index = simple_index
result = index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
result = index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
def test_min_fitting_element(self):
result = RangeIndex(0, 20, 2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(1, 6)._min_fitting_element(1)
assert 1 == result
result = RangeIndex(18, -2, -2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(5, 0, -1)._min_fitting_element(1)
assert 1 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num)
assert big_num == result
def test_max_fitting_element(self):
result = RangeIndex(0, 20, 2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(1, 6)._max_fitting_element(4)
assert 4 == result
result = RangeIndex(18, -2, -2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(5, 0, -1)._max_fitting_element(4)
assert 4 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._max_fitting_element(big_num)
assert big_num == result
def test_pickle_compat_construction(self):
# RangeIndex() is a valid constructor
pass
def test_slice_specialised(self, simple_index):
index = simple_index
index.name = "foo"
# scalar indexing
res = index[1]
expected = 2
assert res == expected
res = index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
index_slice = index[:]
expected = index
tm.assert_index_equal(index_slice, expected)
# positive slice values
index_slice = index[7:10:2]
expected = Index(np.array([14, 18]), name="foo")
tm.assert_index_equal(index_slice, expected)
# negative slice values
index_slice = index[-1:-5:-2]
expected = Index(np.array([18, 14]), name="foo")
tm.assert_index_equal(index_slice, expected)
# stop overshoot
index_slice = index[2:100:4]
expected = Index(np.array([4, 12]), name="foo")
tm.assert_index_equal(index_slice, expected)
# reverse
index_slice = index[::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-8::-1]
expected = Index(np.array([4, 2, 0]), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[40::-1]
expected = Index(index.values[40::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[10::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
@pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
def test_len_specialised(self, step):
# make sure that our len is the same as np.arange calc
start, stop = (0, 5) if step > 0 else (5, 0)
arr = np.arange(start, stop, step)
index = RangeIndex(start, stop, step)
assert len(index) == len(arr)
index = RangeIndex(stop, start, step)
assert len(index) == 0
@pytest.fixture(
params=[
([RI(1, 12, 5)], RI(1, 12, 5)),
([RI(0, 6, 4)], RI(0, 6, 4)),
([RI(1, 3), RI(3, 7)], RI(1, 7)),
([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)),
([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)),
([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)),
([RI(-4, -8), RI(-8, -12)], RI(0, 0)),
([RI(-4, -8), RI(3, -4)], RI(0, 0)),
([RI(-4, -8), RI(3, 5)], RI(3, 5)),
([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),
([RI(-2), RI(3, 5)], RI(3, 5)),
([RI(2), RI(2)], I64([0, 1, 0, 1])),
([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),
([RI(2), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),
([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),
([RI(3), I64([-1, 3, 15])], I64([0, 1, 2, -1, 3, 15])),
([RI(3), F64([-1, 3.1, 15.0])], F64([0, 1, 2, -1, 3.1, 15.0])),
([RI(3), OI(["a", None, 14])], OI([0, 1, 2, "a", None, 14])),
([RI(3, 1), OI(["a", None, 14])], OI(["a", None, 14])),
]
)
def appends(self, request):
"""Inputs and expected outputs for RangeIndex.append test"""
return request.param
def test_append(self, appends):
# GH16212
indices, expected = appends
result = indices[0].append(indices[1:])
tm.assert_index_equal(result, expected, exact=True)
if len(indices) == 2:
# Append single item rather than list
result2 = indices[0].append(indices[1])
tm.assert_index_equal(result2, expected, exact=True)
def test_engineless_lookup(self):
# GH 16685
# Standard lookup on RangeIndex should not require the engine to be
# created
idx = RangeIndex(2, 10, 3)
assert idx.get_loc(5) == 1
tm.assert_numpy_array_equal(
idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2]))
)
with pytest.raises(KeyError, match="3"):
idx.get_loc(3)
assert "_engine" not in idx._cache
# Different types of scalars can be excluded immediately, no need to
# use the _engine
with pytest.raises(KeyError, match="'a'"):
idx.get_loc("a")
assert "_engine" not in idx._cache
def test_format_empty(self):
# GH35712
empty_idx = self._index_cls(0)
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
@pytest.mark.parametrize(
"RI",
[
RangeIndex(0, -1, -1),
RangeIndex(0, 1, 1),
RangeIndex(1, 3, 2),
RangeIndex(0, -1, -2),
RangeIndex(-3, -5, -2),
],
)
def test_append_len_one(self, RI):
# GH39401
result = RI.append([])
tm.assert_index_equal(result, RI, exact=True)
@pytest.mark.parametrize("base", [RangeIndex(0, 2), Index([0, 1])])
def test_isin_range(self, base):
# GH#41151
values = RangeIndex(0, 1)
result = base.isin(values)
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
| 31.61194 | 86 | 0.578435 | import numpy as np
import pytest
from pandas.core.dtypes.common import ensure_platform_int
import pandas as pd
from pandas import (
Float64Index,
Index,
Int64Index,
RangeIndex,
)
import pandas._testing as tm
from pandas.tests.indexes.test_numeric import Numeric
RI = RangeIndex
I64 = Int64Index
F64 = Float64Index
OI = Index
class TestRangeIndex(Numeric):
_index_cls = RangeIndex
@pytest.fixture
def simple_index(self) -> Index:
return self._index_cls(start=0, stop=20, step=2)
@pytest.fixture(
params=[
RangeIndex(start=0, stop=20, step=2, name="foo"),
RangeIndex(start=18, stop=-1, step=-2, name="bar"),
],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return request.param
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_too_many_names(self, simple_index):
index = simple_index
with pytest.raises(ValueError, match="^Length"):
index.names = ["roger", "harold"]
@pytest.mark.parametrize(
"index, start, stop, step",
[
(RangeIndex(5), 0, 5, 1),
(RangeIndex(0, 5), 0, 5, 1),
(RangeIndex(5, step=2), 0, 5, 2),
(RangeIndex(1, 5, 2), 1, 5, 2),
],
)
def test_start_stop_step_attrs(self, index, start, stop, step):
assert index.start == start
assert index.stop == stop
assert index.step == step
@pytest.mark.parametrize("attr_name", ["_start", "_stop", "_step"])
def test_deprecated_start_stop_step_attrs(self, attr_name, simple_index):
idx = simple_index
with tm.assert_produces_warning(FutureWarning):
getattr(idx, attr_name)
def test_copy(self):
i = RangeIndex(5, name="Foo")
i_copy = i.copy()
assert i_copy is not i
assert i_copy.identical(i)
assert i_copy._range == range(0, 5, 1)
assert i_copy.name == "Foo"
def test_repr(self):
i = RangeIndex(5, name="Foo")
result = repr(i)
expected = "RangeIndex(start=0, stop=5, step=1, name='Foo')"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
i = RangeIndex(5, 0, -1)
result = repr(i)
expected = "RangeIndex(start=5, stop=0, step=-1)"
assert result == expected
result = eval(result)
tm.assert_index_equal(result, i, exact=True)
def test_insert(self):
idx = RangeIndex(5, name="Foo")
result = idx[1:4]
tm.assert_index_equal(idx[0:4], result.insert(0, idx[0]))
expected = Float64Index([0, np.nan, 1, 2, 3, 4])
for na in [np.nan, None, pd.NA]:
result = RangeIndex(5).insert(1, na)
tm.assert_index_equal(result, expected)
result = RangeIndex(5).insert(1, pd.NaT)
expected = Index([0, pd.NaT, 1, 2, 3, 4], dtype=object)
tm.assert_index_equal(result, expected)
def test_delete(self):
idx = RangeIndex(5, name="Foo")
expected = idx[1:].astype(int)
result = idx.delete(0)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
expected = idx[:-1].astype(int)
result = idx.delete(-1)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
msg = "index 5 is out of bounds for axis 0 with size 5"
with pytest.raises((IndexError, ValueError), match=msg):
result = idx.delete(len(idx))
def test_view(self):
i = RangeIndex(0, name="Foo")
i_view = i.view()
assert i_view.name == "Foo"
i_view = i.view("i8")
tm.assert_numpy_array_equal(i.values, i_view)
i_view = i.view(RangeIndex)
tm.assert_index_equal(i, i_view)
def test_dtype(self, simple_index):
index = simple_index
assert index.dtype == np.int64
def test_cache(self):
idx = RangeIndex(0, 100, 10)
assert idx._cache == {}
repr(idx)
assert idx._cache == {}
str(idx)
assert idx._cache == {}
idx.get_loc(20)
assert idx._cache == {}
90 in idx
assert idx._cache == {}
91 in idx
assert idx._cache == {}
idx.all()
assert idx._cache == {}
idx.any()
assert idx._cache == {}
for _ in idx:
pass
assert idx._cache == {}
idx.format()
assert idx._cache == {}
df = pd.DataFrame({"a": range(10)}, index=idx)
str(df)
assert idx._cache == {}
df.loc[50]
assert idx._cache == {}
with pytest.raises(KeyError, match="51"):
df.loc[51]
assert idx._cache == {}
df.loc[10:50]
assert idx._cache == {}
df.iloc[5:10]
assert idx._cache == {}
idx._data
assert isinstance(idx._data, np.ndarray)
assert idx._data is idx._data
assert len(idx._cache) == 1
expected = np.arange(0, 100, 10, dtype="int64")
tm.assert_numpy_array_equal(idx._cache["_data"], expected)
def test_is_monotonic(self):
index = RangeIndex(0, 20, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is False
index = RangeIndex(4, 0, -1)
assert index.is_monotonic is False
assert index._is_strictly_monotonic_increasing is False
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 2)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(2, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
index = RangeIndex(1, 1)
assert index.is_monotonic is True
assert index.is_monotonic_increasing is True
assert index.is_monotonic_decreasing is True
assert index._is_strictly_monotonic_increasing is True
assert index._is_strictly_monotonic_decreasing is True
def test_equals_range(self):
equiv_pairs = [
(RangeIndex(0, 9, 2), RangeIndex(0, 10, 2)),
(RangeIndex(0), RangeIndex(1, -1, 3)),
(RangeIndex(1, 2, 3), RangeIndex(1, 3, 4)),
(RangeIndex(0, -9, -2), RangeIndex(0, -10, -2)),
]
for left, right in equiv_pairs:
assert left.equals(right)
assert right.equals(left)
def test_logical_compat(self, simple_index):
idx = simple_index
assert idx.all() == idx.values.all()
assert idx.any() == idx.values.any()
def test_identical(self, simple_index):
index = simple_index
i = Index(index.copy())
assert i.identical(index)
if isinstance(index, RangeIndex):
return
same_values_different_type = Index(i, dtype=object)
assert not i.identical(same_values_different_type)
i = index.copy(dtype=object)
i = i.rename("foo")
same_values = Index(i, dtype=object)
assert same_values.identical(index.copy(dtype=object))
assert not i.identical(index)
assert Index(same_values, name="foo", dtype=object).identical(i)
assert not index.copy(dtype=object).identical(index.copy(dtype="int64"))
def test_nbytes(self):
# memory savings vs int index
i = RangeIndex(0, 1000)
assert i.nbytes < i._int64index.nbytes / 10
# constant memory usage
i2 = RangeIndex(0, 10)
assert i.nbytes == i2.nbytes
@pytest.mark.parametrize(
"start,stop,step",
[
# can't
("foo", "bar", "baz"),
("0", "1", "2"),
],
)
def test_cant_or_shouldnt_cast(self, start, stop, step):
msg = f"Wrong type {type(start)} for value {start}"
with pytest.raises(TypeError, match=msg):
RangeIndex(start, stop, step)
def test_view_index(self, simple_index):
index = simple_index
index.view(Index)
def test_prevent_casting(self, simple_index):
index = simple_index
result = index.astype("O")
assert result.dtype == np.object_
def test_repr_roundtrip(self, simple_index):
index = simple_index
tm.assert_index_equal(eval(repr(index)), index)
def test_slice_keep_name(self):
idx = RangeIndex(1, 2, name="asdf")
assert idx.name == idx[1:].name
def test_has_duplicates(self, index):
assert index.is_unique
assert not index.has_duplicates
def test_extended_gcd(self, simple_index):
index = simple_index
result = index._extended_gcd(6, 10)
assert result[0] == result[1] * 6 + result[2] * 10
assert 2 == result[0]
result = index._extended_gcd(10, 6)
assert 2 == result[1] * 10 + result[2] * 6
assert 2 == result[0]
def test_min_fitting_element(self):
result = RangeIndex(0, 20, 2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(1, 6)._min_fitting_element(1)
assert 1 == result
result = RangeIndex(18, -2, -2)._min_fitting_element(1)
assert 2 == result
result = RangeIndex(5, 0, -1)._min_fitting_element(1)
assert 1 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._min_fitting_element(big_num)
assert big_num == result
def test_max_fitting_element(self):
result = RangeIndex(0, 20, 2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(1, 6)._max_fitting_element(4)
assert 4 == result
result = RangeIndex(18, -2, -2)._max_fitting_element(17)
assert 16 == result
result = RangeIndex(5, 0, -1)._max_fitting_element(4)
assert 4 == result
big_num = 500000000000000000000000
result = RangeIndex(5, big_num * 2, 1)._max_fitting_element(big_num)
assert big_num == result
def test_pickle_compat_construction(self):
# RangeIndex() is a valid constructor
pass
def test_slice_specialised(self, simple_index):
index = simple_index
index.name = "foo"
# scalar indexing
res = index[1]
expected = 2
assert res == expected
res = index[-1]
expected = 18
assert res == expected
# slicing
# slice value completion
index_slice = index[:]
expected = index
tm.assert_index_equal(index_slice, expected)
# positive slice values
index_slice = index[7:10:2]
expected = Index(np.array([14, 18]), name="foo")
tm.assert_index_equal(index_slice, expected)
# negative slice values
index_slice = index[-1:-5:-2]
expected = Index(np.array([18, 14]), name="foo")
tm.assert_index_equal(index_slice, expected)
# stop overshoot
index_slice = index[2:100:4]
expected = Index(np.array([4, 12]), name="foo")
tm.assert_index_equal(index_slice, expected)
# reverse
index_slice = index[::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-8::-1]
expected = Index(np.array([4, 2, 0]), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[-40::-1]
expected = Index(np.array([], dtype=np.int64), name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[40::-1]
expected = Index(index.values[40::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
index_slice = index[10::-1]
expected = Index(index.values[::-1], name="foo")
tm.assert_index_equal(index_slice, expected)
@pytest.mark.parametrize("step", set(range(-5, 6)) - {0})
def test_len_specialised(self, step):
# make sure that our len is the same as np.arange calc
start, stop = (0, 5) if step > 0 else (5, 0)
arr = np.arange(start, stop, step)
index = RangeIndex(start, stop, step)
assert len(index) == len(arr)
index = RangeIndex(stop, start, step)
assert len(index) == 0
@pytest.fixture(
params=[
([RI(1, 12, 5)], RI(1, 12, 5)),
([RI(0, 6, 4)], RI(0, 6, 4)),
([RI(1, 3), RI(3, 7)], RI(1, 7)),
([RI(1, 5, 2), RI(5, 6)], RI(1, 6, 2)),
([RI(1, 3, 2), RI(4, 7, 3)], RI(1, 7, 3)),
([RI(-4, 3, 2), RI(4, 7, 2)], RI(-4, 7, 2)),
([RI(-4, -8), RI(-8, -12)], RI(0, 0)),
([RI(-4, -8), RI(3, -4)], RI(0, 0)),
([RI(-4, -8), RI(3, 5)], RI(3, 5)),
([RI(-4, -2), RI(3, 5)], I64([-4, -3, 3, 4])),
([RI(-2), RI(3, 5)], RI(3, 5)),
([RI(2), RI(2)], I64([0, 1, 0, 1])),
([RI(2), RI(2, 5), RI(5, 8, 4)], RI(0, 6)),
([RI(2), RI(3, 5), RI(5, 8, 4)], I64([0, 1, 3, 4, 5])),
([RI(-2, 2), RI(2, 5), RI(5, 8, 4)], RI(-2, 6)),
([RI(3), I64([-1, 3, 15])], I64([0, 1, 2, -1, 3, 15])),
([RI(3), F64([-1, 3.1, 15.0])], F64([0, 1, 2, -1, 3.1, 15.0])),
([RI(3), OI(["a", None, 14])], OI([0, 1, 2, "a", None, 14])),
([RI(3, 1), OI(["a", None, 14])], OI(["a", None, 14])),
]
)
def appends(self, request):
return request.param
def test_append(self, appends):
# GH16212
indices, expected = appends
result = indices[0].append(indices[1:])
tm.assert_index_equal(result, expected, exact=True)
if len(indices) == 2:
# Append single item rather than list
result2 = indices[0].append(indices[1])
tm.assert_index_equal(result2, expected, exact=True)
def test_engineless_lookup(self):
# GH 16685
# Standard lookup on RangeIndex should not require the engine to be
# created
idx = RangeIndex(2, 10, 3)
assert idx.get_loc(5) == 1
tm.assert_numpy_array_equal(
idx.get_indexer([2, 8]), ensure_platform_int(np.array([0, 2]))
)
with pytest.raises(KeyError, match="3"):
idx.get_loc(3)
assert "_engine" not in idx._cache
# Different types of scalars can be excluded immediately, no need to
# use the _engine
with pytest.raises(KeyError, match="'a'"):
idx.get_loc("a")
assert "_engine" not in idx._cache
def test_format_empty(self):
# GH35712
empty_idx = self._index_cls(0)
assert empty_idx.format() == []
assert empty_idx.format(name=True) == [""]
@pytest.mark.parametrize(
"RI",
[
RangeIndex(0, -1, -1),
RangeIndex(0, 1, 1),
RangeIndex(1, 3, 2),
RangeIndex(0, -1, -2),
RangeIndex(-3, -5, -2),
],
)
def test_append_len_one(self, RI):
# GH39401
result = RI.append([])
tm.assert_index_equal(result, RI, exact=True)
@pytest.mark.parametrize("base", [RangeIndex(0, 2), Index([0, 1])])
def test_isin_range(self, base):
# GH#41151
values = RangeIndex(0, 1)
result = base.isin(values)
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
| true | true |
f7313ff228df6f15c217111d85289fbb96c16a6e | 7,086 | py | Python | rpython/translator/backendopt/test/test_merge_if_blocks.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 381 | 2018-08-18T03:37:22.000Z | 2022-02-06T23:57:36.000Z | rpython/translator/backendopt/test/test_merge_if_blocks.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 16 | 2018-09-22T18:12:47.000Z | 2022-02-22T20:03:59.000Z | rpython/translator/backendopt/test/test_merge_if_blocks.py | nanjekyejoannah/pypy | e80079fe13c29eda7b2a6b4cd4557051f975a2d9 | [
"Apache-2.0",
"OpenSSL"
] | 55 | 2015-08-16T02:41:30.000Z | 2022-03-20T20:33:35.000Z | from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks_once
from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks
from rpython.translator.backendopt.all import backend_optimizations
from rpython.translator.translator import TranslationContext, graphof as tgraphof
from rpython.flowspace.model import Block, checkgraph
from rpython.translator.backendopt.removenoops import remove_same_as
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int
from rpython.annotator.model import SomeChar, SomeUnicodeCodePoint
from rpython.rlib.objectmodel import CDefinedIntSymbolic
def do_test_merge(fn, testvalues):
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [type(testvalues[0])])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, fn)
assert len(list(graph.iterblocks())) == 4 #startblock, blocks, returnblock
remove_same_as(graph)
merge_if_blocks_once(graph)
assert len(graph.startblock.exits) == 4
assert len(list(graph.iterblocks())) == 2 #startblock, returnblock
interp = LLInterpreter(rtyper)
for i in testvalues:
expected = fn(i)
actual = interp.eval_graph(graph, [i])
assert actual == expected
def test_merge1():
def merge_int(n):
n += 1
if n == 1:
return 1
elif n == 2:
return 2
elif n == 3:
return 3
return 4
do_test_merge(merge_int, range(4))
do_test_merge(merge_int, [r_uint(i) for i in range(4)])
# this has been disabled:
#if r_longlong is not r_int:
# do_test_merge(merge_int, [r_longlong(i) for i in range(4)])
#do_test_merge(merge_int, [r_ulonglong(i) for i in range(4)])
def merge_chr(n):
c = chr(n + 1)
if c == 'a':
return 'a'
elif c == 'b':
return 'b'
elif c == 'c':
return 'c'
return 'd'
do_test_merge(merge_chr, range(96, 101))
def merge_uchr(n):
c = unichr(n + 1)
if c == u'a':
return u'a'
elif c == u'b':
return u'b'
elif c == u'c':
return u'c'
return u'd'
do_test_merge(merge_uchr, range(96, 101))
def test_merge_passonvars():
def merge(n, m):
if n == 1:
return m + 1
elif n == 2:
return m + 2
elif n == 3:
return m + 3
return m + 4
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
assert len(list(graph.iterblocks())) == 8
remove_same_as(graph)
merge_if_blocks_once(graph)
assert len(graph.startblock.exits) == 4
interp = LLInterpreter(rtyper)
for i in range(1, 5):
res = interp.eval_graph(graph, [i, 1])
assert res == i + 1
def test_merge_several():
def merge(n, m):
r = -1
if n == 0:
if m == 0:
r = 0
elif m == 1:
r = 1
else:
r = 2
elif n == 1:
r = 4
else:
r = 6
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
remove_same_as(graph)
merge_if_blocks(graph)
assert len(graph.startblock.exits) == 3
assert len(list(graph.iterblocks())) == 3
interp = LLInterpreter(rtyper)
for m in range(3):
res = interp.eval_graph(graph, [0, m])
assert res == m
res = interp.eval_graph(graph, [1, 0])
assert res == 4
res = interp.eval_graph(graph, [2, 0])
assert res == 6
def test_merge_with_or():
def merge(n):
if n == 5:
return 4
elif n == 14 or n == 2:
return 16
else:
return 7
do_test_merge(merge, [5, 6, 14, 2, 3, 123])
def test_dont_merge():
def merge(n, m):
r = -1
if n == 0:
r += m
if n == 1:
r += 2 * m
else:
r += 6
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
remove_same_as(graph)
blocknum = len(list(graph.iterblocks()))
merge_if_blocks(graph)
assert blocknum == len(list(graph.iterblocks()))
def test_two_constants():
def fn():
r = range(10, 37, 4)
r.reverse()
return r[0]
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [])
rtyper = t.buildrtyper()
rtyper.specialize()
backend_optimizations(t, merge_if_blocks=True)
graph = tgraphof(t, fn)
blocknum = len(list(graph.iterblocks()))
merge_if_blocks(graph)
assert blocknum == len(list(graph.iterblocks()))
def test_same_cases():
def fn(x):
if x == 42:
r = 1
elif x == 42:
r = 2
else:
r = 3
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [int])
rtyper = t.buildrtyper()
rtyper.specialize()
backend_optimizations(t, merge_if_blocks=True)
graph = tgraphof(t, fn)
assert len(graph.startblock.exits) == 2
interp = LLInterpreter(rtyper)
for i in [42, 43]:
expected = fn(i)
actual = interp.eval_graph(graph, [i])
assert actual == expected
def test_replace_exitswitch_by_constant_bug():
class X:
pass
def constant9():
x = X()
x.n = 3
x.n = 9
return x.n
def fn():
n = constant9()
if n == 1: return 5
elif n == 2: return 6
elif n == 3: return 8
elif n == 4: return -123
elif n == 5: return 12973
else: return n
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = t.graphs[0]
remove_same_as(graph)
merge_if_blocks_once(graph)
from rpython.translator.backendopt import malloc, inline
inline.auto_inlining(t, 20)
malloc.remove_mallocs(t, t.graphs)
from rpython.translator import simplify
simplify.join_blocks(graph)
def test_switch_on_symbolic():
symb1 = CDefinedIntSymbolic("1", 1)
symb2 = CDefinedIntSymbolic("2", 2)
symb3 = CDefinedIntSymbolic("3", 3)
def fn(x):
res = 0
if x == symb1:
res += x + 1
elif x == symb2:
res += x + 2
elif x == symb3:
res += x + 3
res += 1
return res
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = t.graphs[0]
remove_same_as(graph)
res = merge_if_blocks_once(graph)
assert not res
checkgraph(graph)
| 27.788235 | 81 | 0.576348 | from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks_once
from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks
from rpython.translator.backendopt.all import backend_optimizations
from rpython.translator.translator import TranslationContext, graphof as tgraphof
from rpython.flowspace.model import Block, checkgraph
from rpython.translator.backendopt.removenoops import remove_same_as
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int
from rpython.annotator.model import SomeChar, SomeUnicodeCodePoint
from rpython.rlib.objectmodel import CDefinedIntSymbolic
def do_test_merge(fn, testvalues):
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [type(testvalues[0])])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, fn)
assert len(list(graph.iterblocks())) == 4
remove_same_as(graph)
merge_if_blocks_once(graph)
assert len(graph.startblock.exits) == 4
assert len(list(graph.iterblocks())) == 2
interp = LLInterpreter(rtyper)
for i in testvalues:
expected = fn(i)
actual = interp.eval_graph(graph, [i])
assert actual == expected
def test_merge1():
def merge_int(n):
n += 1
if n == 1:
return 1
elif n == 2:
return 2
elif n == 3:
return 3
return 4
do_test_merge(merge_int, range(4))
do_test_merge(merge_int, [r_uint(i) for i in range(4)])
def merge_chr(n):
c = chr(n + 1)
if c == 'a':
return 'a'
elif c == 'b':
return 'b'
elif c == 'c':
return 'c'
return 'd'
do_test_merge(merge_chr, range(96, 101))
def merge_uchr(n):
c = unichr(n + 1)
if c == u'a':
return u'a'
elif c == u'b':
return u'b'
elif c == u'c':
return u'c'
return u'd'
do_test_merge(merge_uchr, range(96, 101))
def test_merge_passonvars():
def merge(n, m):
if n == 1:
return m + 1
elif n == 2:
return m + 2
elif n == 3:
return m + 3
return m + 4
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
assert len(list(graph.iterblocks())) == 8
remove_same_as(graph)
merge_if_blocks_once(graph)
assert len(graph.startblock.exits) == 4
interp = LLInterpreter(rtyper)
for i in range(1, 5):
res = interp.eval_graph(graph, [i, 1])
assert res == i + 1
def test_merge_several():
def merge(n, m):
r = -1
if n == 0:
if m == 0:
r = 0
elif m == 1:
r = 1
else:
r = 2
elif n == 1:
r = 4
else:
r = 6
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
remove_same_as(graph)
merge_if_blocks(graph)
assert len(graph.startblock.exits) == 3
assert len(list(graph.iterblocks())) == 3
interp = LLInterpreter(rtyper)
for m in range(3):
res = interp.eval_graph(graph, [0, m])
assert res == m
res = interp.eval_graph(graph, [1, 0])
assert res == 4
res = interp.eval_graph(graph, [2, 0])
assert res == 6
def test_merge_with_or():
def merge(n):
if n == 5:
return 4
elif n == 14 or n == 2:
return 16
else:
return 7
do_test_merge(merge, [5, 6, 14, 2, 3, 123])
def test_dont_merge():
def merge(n, m):
r = -1
if n == 0:
r += m
if n == 1:
r += 2 * m
else:
r += 6
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
remove_same_as(graph)
blocknum = len(list(graph.iterblocks()))
merge_if_blocks(graph)
assert blocknum == len(list(graph.iterblocks()))
def test_two_constants():
def fn():
r = range(10, 37, 4)
r.reverse()
return r[0]
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [])
rtyper = t.buildrtyper()
rtyper.specialize()
backend_optimizations(t, merge_if_blocks=True)
graph = tgraphof(t, fn)
blocknum = len(list(graph.iterblocks()))
merge_if_blocks(graph)
assert blocknum == len(list(graph.iterblocks()))
def test_same_cases():
def fn(x):
if x == 42:
r = 1
elif x == 42:
r = 2
else:
r = 3
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [int])
rtyper = t.buildrtyper()
rtyper.specialize()
backend_optimizations(t, merge_if_blocks=True)
graph = tgraphof(t, fn)
assert len(graph.startblock.exits) == 2
interp = LLInterpreter(rtyper)
for i in [42, 43]:
expected = fn(i)
actual = interp.eval_graph(graph, [i])
assert actual == expected
def test_replace_exitswitch_by_constant_bug():
class X:
pass
def constant9():
x = X()
x.n = 3
x.n = 9
return x.n
def fn():
n = constant9()
if n == 1: return 5
elif n == 2: return 6
elif n == 3: return 8
elif n == 4: return -123
elif n == 5: return 12973
else: return n
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = t.graphs[0]
remove_same_as(graph)
merge_if_blocks_once(graph)
from rpython.translator.backendopt import malloc, inline
inline.auto_inlining(t, 20)
malloc.remove_mallocs(t, t.graphs)
from rpython.translator import simplify
simplify.join_blocks(graph)
def test_switch_on_symbolic():
symb1 = CDefinedIntSymbolic("1", 1)
symb2 = CDefinedIntSymbolic("2", 2)
symb3 = CDefinedIntSymbolic("3", 3)
def fn(x):
res = 0
if x == symb1:
res += x + 1
elif x == symb2:
res += x + 2
elif x == symb3:
res += x + 3
res += 1
return res
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = t.graphs[0]
remove_same_as(graph)
res = merge_if_blocks_once(graph)
assert not res
checkgraph(graph)
| true | true |
f7313ffd9372f0396f475d8a1a68661916b500ec | 796 | py | Python | leetcode/Algorithms/107.BinaryTreeLevelOrderTraversalII/Solution.py | liupangzi/codekata | 079373707601198f79fb6215b876a4cbcab32ee9 | [
"MIT"
] | 58 | 2017-04-30T12:59:37.000Z | 2020-08-05T14:23:57.000Z | leetcode/Algorithms/107.BinaryTreeLevelOrderTraversalII/Solution.py | liupangzi/codekata | 079373707601198f79fb6215b876a4cbcab32ee9 | [
"MIT"
] | null | null | null | leetcode/Algorithms/107.BinaryTreeLevelOrderTraversalII/Solution.py | liupangzi/codekata | 079373707601198f79fb6215b876a4cbcab32ee9 | [
"MIT"
] | 6 | 2018-01-20T18:35:09.000Z | 2020-07-22T14:20:27.000Z | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
result = [[]]
self.helper(result, root, 0)
result.reverse()
return result
def helper(self, result, root, level):
if len(result) == level:
result.append([root.val])
else:
result[level].append(root.val)
if root.left:
self.helper(result, root.left, level + 1)
if root.right:
self.helper(result, root.right, level + 1)
| 24.121212 | 54 | 0.520101 |
class Solution(object):
def levelOrderBottom(self, root):
if not root:
return []
result = [[]]
self.helper(result, root, 0)
result.reverse()
return result
def helper(self, result, root, level):
if len(result) == level:
result.append([root.val])
else:
result[level].append(root.val)
if root.left:
self.helper(result, root.left, level + 1)
if root.right:
self.helper(result, root.right, level + 1)
| true | true |
f7314057a11eff7d520a32457c96935f97b70271 | 24,185 | py | Python | backend/lib/sites/facebook.py | kavish-p/youtube-search-dashboard | 2810b0d098699f11086868f9d754e0cb6194d6ff | [
"MIT"
] | 1 | 2021-03-26T05:19:48.000Z | 2021-03-26T05:19:48.000Z | chat_downloader/sites/facebook.py | lbmaian/chat-replay-downloader | 0f1b1326eec9fb45031d7fd58e0a4a9dd2297d5d | [
"MIT"
] | null | null | null | chat_downloader/sites/facebook.py | lbmaian/chat-replay-downloader | 0f1b1326eec9fb45031d7fd58e0a4a9dd2297d5d | [
"MIT"
] | null | null | null | import json
from json.decoder import JSONDecodeError
import xml.etree.ElementTree as ET
import isodate
import re
from .common import (
Chat,
BaseChatDownloader,
Remapper as r
)
from requests.exceptions import RequestException
from ..utils import (
remove_prefixes,
multi_get,
try_get_first_value,
try_get,
seconds_to_time,
camel_case_split,
ensure_seconds,
attempts,
get_title_of_webpage,
log
)
class FacebookChatDownloader(BaseChatDownloader):
_FB_HOMEPAGE = 'https://www.facebook.com'
_FB_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': _FB_HOMEPAGE,
'Accept-Language': 'en-US,en;',
}
_INITIAL_DATR_REGEX = r'_js_datr\",\"([^\"]+)'
_INITIAL_LSD_REGEX = r'<input.*?name=\"lsd\".*?value=\"([^\"]+)[^>]*>'
def __init__(self, **kwargs):
super().__init__(**kwargs)
# update headers for all subsequent FB requests
self.update_session_headers(self._FB_HEADERS)
initial_data = self._session_get(
self._FB_HOMEPAGE,
headers=self._FB_HEADERS, allow_redirects=False).text
datr = re.search(self._INITIAL_DATR_REGEX, initial_data)
if datr:
datr = datr.group(1)
else:
print('unable to get datr cookie')
raise Exception # TODO
sb = self.get_cookie_value('sb')
fr = self.get_cookie_value('fr')
# print('sb:', sb, flush=True)
# print('fr:', fr, flush=True)
# print('datr:', datr, flush=True)
lsd_info = re.search(self._INITIAL_LSD_REGEX, initial_data)
if not lsd_info:
print('no lsd info')
raise Exception # TODO
lsd = lsd_info.group(1)
# print('lsd:', lsd, flush=True)
request_headers = {
# TODO sb and fr unnecessary?
# wd=1122x969;
'Cookie': 'sb={}; fr={}; datr={};'.format(sb, fr, datr)
}
self.update_session_headers(request_headers)
self.data = {
# TODO need things like jazoest? (and other stuff from hidden elements/html)
'__a': 1, # TODO needed?
'lsd': lsd,
}
_NAME = 'facebook.com'
# Regex provided by youtube-dl
_VALID_URL = r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com)/
(?:[^#]*?\#!/)?
(?:[^/]+/videos/(?:[^/]+/)?)
)
(?P<id>[0-9]+)
'''
_TESTS = [
]
_VIDEO_PAGE_TAHOE_TEMPLATE = _FB_HOMEPAGE + \
'/video/tahoe/async/{}/?chain=true&isvideo=true&payloadtype=primary'
def _parse_fb_json(self, response):
text_to_parse = remove_prefixes(response.text, 'for (;;);')
return json.loads(text_to_parse)
_VOD_COMMENTS_API = _FB_HOMEPAGE + '/videos/vodcomments/'
_GRAPH_API = _FB_HOMEPAGE + '/api/graphql/'
_VIDEO_URL_FORMAT = _FB_HOMEPAGE + '/video.php?v={}'
# _VIDEO_TITLE_REGEX = r'<meta\s+name=["\']description["\']\s+content=["\'](.*?)["\']\s*/>'
def _attempt_fb_retrieve(self, url, max_attempts, retry_timeout, fb_json=False, **post_kwargs):
for attempt_number in attempts(max_attempts):
try:
response = self._session_post(url, **post_kwargs)
if fb_json:
return self._parse_fb_json(response)
else:
return response.json()
except JSONDecodeError as e:
self.retry(attempt_number, max_attempts, e, retry_timeout,
text='Unable to parse JSON: `{}`'.format(response.text))
except RequestException as e:
self.retry(attempt_number, max_attempts, e, retry_timeout)
def _get_initial_info(self, video_id, params):
info = {}
max_attempts = params.get('max_attempts')
retry_timeout = params.get('retry_timeout')
# TODO remove duplication - many similar methods
json_data = self._attempt_fb_retrieve(
self._VIDEO_PAGE_TAHOE_TEMPLATE.format(video_id),
max_attempts,
retry_timeout,
True,
headers=self._FB_HEADERS, data=self.data
)
# print(json_data)
markup = multi_get(json_data, 'payload', 'video', 'markup', '__html')
video_markup = ET.fromstring(markup)
tags = [x.text for x in video_markup.findall(
'.//span[@class="_50f7"]')]
if len(tags) >= 2:
info['title'] = tags[0]
info['username'] = tags[1]
else:
video_page_url = self._VIDEO_URL_FORMAT.format(video_id)
for attempt_number in attempts(max_attempts):
try:
html = self._session_get(video_page_url).text
match = get_title_of_webpage(html)
if match:
title_info = match.split(' - ', 1)
if len(title_info) == 2:
info['username'] = title_info[0]
info['title'] = title_info[1]
break
except RequestException as e:
self.retry(attempt_number, max_attempts, e, retry_timeout)
instances = multi_get(json_data, 'jsmods', 'instances')
video_data = {}
for item in instances:
if try_get(item, lambda x: x[1][0]) == 'VideoConfig':
video_item = item[2][0]
if video_item.get('video_id'):
video_data = video_item['videoData'][0]
# print(video_data)
break
# print(video_data)
if not video_data:
print('unable to get video data')
raise Exception
dash_manifest = video_data.get('dash_manifest')
if dash_manifest: # when not live, this returns
dash_manifest_xml = ET.fromstring(dash_manifest)
info['duration'] = isodate.parse_duration(
dash_manifest_xml.attrib['mediaPresentationDuration']).total_seconds()
info['is_live'] = video_data['is_live_stream']
return info
@staticmethod
def _parse_feedback(feedback):
new_feedback = {}
edges = multi_get(feedback, 'top_reactions', 'edges')
if not edges:
return new_feedback
new_feedback['reaction_types'] = []
for edge in edges:
node = edge.get('node')
reaction_item = {
'key': node.get('key'),
'id': node.get('id'),
'name': node.get('reaction_type'),
'count': edge.get('reaction_count')
}
new_feedback['reaction_types'].append(reaction_item)
new_feedback['total_count'] = multi_get(feedback, 'reactors', 'count')
new_feedback['total_count_reduced'] = multi_get(
feedback, 'reactors', 'count_reduced')
return new_feedback
@staticmethod
def get_text(item):
return item.get('text') if item else None
@staticmethod
def parse_image(item):
return BaseChatDownloader.create_image(item.get('uri'), item.get('width'), item.get('height'))
@staticmethod
def get_uri(item):
return item.get('uri')
@staticmethod
def _parse_attachment_info(original_item):
item = {}
if isinstance(original_item, (list, tuple)) and len(original_item) > 0:
original_item = original_item[0]
if not original_item:
return item
for key in original_item:
BaseChatDownloader.remap(
item, FacebookChatDownloader._TARGET_MEDIA_REMAPPING, key, original_item[key])
# VideoTipJarPayment
quantity = item.get('quantity')
if quantity:
item['text'] = 'Sent {} Star{}'.format(
quantity, 's' if quantity != 1 else '')
# For photos:
blurred_image = item.pop('blurred_image', None)
massive_image = item.pop('massive_image', None)
if blurred_image and massive_image:
item['text'] = BaseChatDownloader.create_image(
blurred_image,
massive_image.get('width'),
massive_image.get('height')
)
# style_infos
donation_comment_text = item.pop('donation_comment_text', None)
if donation_comment_text:
entity = try_get(donation_comment_text,
lambda x: x['ranges'][0]['entity']) or {}
for key in entity:
BaseChatDownloader.remap(
item, FacebookChatDownloader._TARGET_MEDIA_REMAPPING, key, entity[key])
item['text'] = donation_comment_text.get('text')
# DEBUGGING
original_type_name = original_item.get('__typename')
if original_type_name not in FacebookChatDownloader._KNOWN_ATTACHMENT_TYPES:
print('debug')
print('unknown attachment type:', original_type_name)
print(original_item)
print(item)
input()
return item
@staticmethod
def _parse_target(media):
item = {}
return item
@staticmethod
def _parse_author_badges(item):
keys = (('badge_asset', 'small'), ('information_asset', 'colour'))
icons = list(map(lambda x: BaseChatDownloader.create_image(
FacebookChatDownloader._FB_HOMEPAGE + item.get(x[0]), 24, 24, x[1]), keys))
icons.append(BaseChatDownloader.create_image(
item.get('multiple_badge_asset'), 36, 36, 'large'))
return {
'title': item.get('text'),
'alternative_title': item.get('information_title'),
'description': item.get('information_description'),
'icons': icons,
# badge_asset
# multiple_badge_asset
# information_asset
'icon_name': item.get('identity_badge_type')
}
_ATTACHMENT_REMAPPING = {
'url': 'url', # facebook redirect url,
'source': r('source', get_text),
'title_with_entities': r('title', get_text),
'target': r('target', _parse_attachment_info),
'media': r('media', _parse_attachment_info),
'style_infos': r('style_infos', _parse_attachment_info),
'attachment_text': r('text', get_text),
}
_IGNORE_ATTACHMENT_KEYS = [
'tracking',
'action_links'
]
_KNOWN_ATTACHMENT_KEYS = set(
list(_ATTACHMENT_REMAPPING.keys()) + _IGNORE_ATTACHMENT_KEYS)
@staticmethod
def _parse_attachment_styles(item):
parsed = {}
attachment = multi_get(item, 'style_type_renderer', 'attachment')
if not attachment:
# TODO debug log
print('NO ATTACHMENT')
print(item)
return parsed
# set texts:
for key in attachment:
BaseChatDownloader.remap(
parsed, FacebookChatDownloader._ATTACHMENT_REMAPPING, key, attachment[key])
for key in ('target', 'media', 'style_infos'):
if parsed.get(key) == {}:
parsed.pop(key)
missing_keys = attachment.keys() - FacebookChatDownloader._KNOWN_ATTACHMENT_KEYS
if missing_keys:
print('MISSING ATTACHMENT KEYS:', missing_keys)
print(item)
print(parsed)
input()
return parsed
_TARGET_MEDIA_REMAPPING = {
'id': 'id',
'__typename': r('type', camel_case_split),
'fallback_image': r('image', parse_image),
'is_playable': 'is_playable',
'url': 'url',
'mobileUrl': 'mobile_url',
# Sticker
'pack': 'pack',
'label': 'label',
'image': r('image', parse_image),
# VideoTipJarPayment
'stars_image_on_star_quantity': 'icon',
'spark_quantity': 'quantity',
# Page
'name': 'name',
'category_name': 'category',
'address': 'address',
'overall_star_rating': 'overall_star_rating',
'profile_picture': r('profile_picture', get_uri),
# Photo
'accessibility_caption': 'accessibility_caption',
'blurred_image': r('blurred_image', get_uri),
'massive_image': 'massive_image',
# FundraiserForStoryDonationAttachmentStyleInfo
'donation_comment_text': 'donation_comment_text'
}
_KNOWN_ATTACHMENT_TYPES = [
'Sticker',
'VideoTipJarPayment',
'Page',
'Group',
'ProfilePicAttachmentMedia',
'User',
'Photo',
'ExternalUrl',
'GenericAttachmentMedia',
'ChatCommandResult',
'CommentMessageInfo',
'FundraiserForStoryDonationAttachmentStyleInfo'
]
_REMAPPING = {
'id': 'message_id',
'community_moderation_state': 'community_moderation_state',
# attachments
'author': 'author',
'feedback': r('reactions', _parse_feedback),
'created_time': r('timestamp', lambda x: x * 1000000),
'upvote_downvote_total': 'upvote_downvote_total',
'is_author_banned_by_content_owner': 'is_author_banned',
'is_author_original_poster': 'is_author_original_poster',
'is_author_bot': 'is_author_bot',
'is_author_non_coworker': 'is_author_non_coworker',
# if banned, ban_action?
'comment_parent': 'comment_parent',
'edit_history': r('number_of_edits', lambda x: x.get('count')),
'timestamp_in_video': 'time_in_seconds',
'written_while_video_was_live': 'written_while_video_was_live',
'translatability_for_viewer': r('message_dialect', lambda x: x.get('source_dialect_name')),
'url': 'message_url',
'body': r('message', get_text),
'identity_badges_web': r('author_badges', lambda x: list(map(FacebookChatDownloader._parse_author_badges, x))),
'attachments': r('attachments', lambda x: list(map(FacebookChatDownloader._parse_attachment_styles, x)))
}
_AUTHOR_REMAPPING = {
'id': 'id',
'name': 'name',
'__typename': r('type', camel_case_split),
'url': 'url',
'is_verified': 'is_verified',
'gender': r('gender', lambda x: x.lower()),
'short_name': 'short_name'
}
@ staticmethod
def _parse_live_stream_node(node):
# if info is None:
# info = {}
info = {}
for key in node:
BaseChatDownloader.remap(
info, FacebookChatDownloader._REMAPPING, key, node[key])
author_info = info.pop('author', {})
BaseChatDownloader.move_to_dict(info, 'author', create_when_empty=True)
for key in author_info:
BaseChatDownloader.remap(
info['author'], FacebookChatDownloader._AUTHOR_REMAPPING, key, author_info[key])
if 'profile_picture_depth_0' in author_info:
info['author']['images'] = []
for size in ((0, 32), (1, 24)):
url = multi_get(
author_info, 'profile_picture_depth_{}'.format(size[0]), 'uri')
info['author']['images'].append(
BaseChatDownloader.create_image(url, size[1], size[1]))
# author_badges = info.pop('author_badges', None)
# if author_badges:
# info['author']['badges'] = author_badges
in_reply_to = info.pop('comment_parent', None)
if isinstance(in_reply_to, dict) and in_reply_to:
info['in_reply_to'] = FacebookChatDownloader._parse_live_stream_node(
in_reply_to)
time_in_seconds = info.get('time_in_seconds')
if time_in_seconds is not None:
info['time_text'] = seconds_to_time(time_in_seconds)
message = info.get('message')
if message:
info['message'] = message
info['message_type'] = 'text_message'
else:
info.pop('message', None) # remove if empty
# remove the following if empty:
if info.get('reactions') == {}: # no reactions
info.pop('reactions')
if info.get('attachments') == []:
info.pop('attachments')
# print("AAAAAAAA")
# print(info.get('attachments'), node)
return info
def _get_live_chat_messages_by_video_id(self, video_id, params):
max_attempts = params.get('max_attempts')
retry_timeout = params.get('retry_timeout')
buffer_size = 25 # max num comments returned by api call
# cursor = ''
variables = {
'videoID': video_id
}
data = {
'variables': json.dumps(variables),
'doc_id': '4889623951078943', # specifies what API call this is?
# 'cursor' : cursor
# &first=12&after=<end_cursor>
}
data.update(self.data)
# p = (), params=p
first_try = True
last_ids = []
while True:
json_data = self._attempt_fb_retrieve(
self._GRAPH_API,
max_attempts,
retry_timeout,
headers=self._FB_HEADERS, data=data
)
feedback = multi_get(json_data, 'data', 'video', 'feedback') or {}
if not feedback:
print('no feedback') # TODO debug
print(json_data, flush=True)
continue
top_level_comments = multi_get(
json_data, 'data', 'video', 'feedback', 'top_level_comments')
edges = top_level_comments.get('edges')[::-1] # reverse order
errors = json_data.get('errors')
if errors:
# TODO will usually resume getting chat..
# maybe add timeout?
print('ERRORS DETECTED')
print(errors)
continue
# TODO - get pagination working
# page_info = top_level_comments.get('page_info')
# after = page_info.get('end_cursor')
num_to_add = 0
for edge in edges:
node = edge.get('node')
if not node:
# TODO debug
print('no node found in edge')
print(edge)
continue
comment_id = node.get('id')
# remove items that have already been parsed
if comment_id in last_ids:
# print('=', end='', flush=True)
continue
last_ids.append(comment_id)
last_ids = last_ids[-buffer_size:] # force x items
if not node:
# TODO debug
print('no node', edge)
continue
parsed_node = FacebookChatDownloader._parse_live_stream_node(
node)
# TODO determine whether to add or not
num_to_add += 1
yield parsed_node
# got 25 items, and this isn't the first one
if num_to_add >= buffer_size and not first_try:
log(
'warning',
'Messages may be coming in faster than requests are being made.'
)
if not top_level_comments:
print('err2')
print(json_data)
if first_try:
first_try = False
def _get_chat_replay_messages_by_video_id(self, video_id, max_duration, params):
max_attempts = params.get('max_attempts')
retry_timeout = params.get('retry_timeout')
# useful tool (convert curl to python request)
# https://curl.trillworks.com/
# timeout_duration = 10 # TODO make this modifiable
initial_request_params = (
('eft_id', video_id),
('target_ufi_instance_id', 'u_2_1'),
# ('should_backfill', 'false'), # used when seeking? - # TODO true on first try?
)
time_increment = 60 # Facebook gets messages by the minute
# TODO make this modifiable
start_time = ensure_seconds(
params.get('start_time'), 0)
end_time = ensure_seconds(
params.get('end_time'), float('inf'))
next_start_time = max(start_time, 0)
end_time = min(end_time, max_duration)
# print(next_start_time, end_time, type(next_start_time), type(end_time))
# return
# total = []
while True:
next_end_time = min(next_start_time + time_increment, end_time)
times = (('start_time', next_start_time),
('end_time', next_end_time))
# print(times, flush=True)
request_params = initial_request_params + times
json_data = self._attempt_fb_retrieve(
self._VOD_COMMENTS_API,
max_attempts,
retry_timeout,
True,
headers=self._FB_HEADERS, params=request_params, data=self.data
)
payloads = multi_get(json_data, 'payload', 'ufipayloads')
if not payloads:
continue
# TODO debug
# print('no comments between',next_start_time, next_end_time, flush=True)
# print('err1')
# print(json_data)
next_start_time = next_end_time
if next_start_time >= end_time:
print('end')
return
for payload in payloads:
time_offset = payload.get('timeoffset')
# print(test)
ufipayload = payload.get('ufipayload')
if not ufipayload:
print('no ufipayload', payload)
continue
# ['comments'][0]['body']['text']
comment = try_get(ufipayload, lambda x: x['comments'][0])
if not comment:
# TODO debug
continue
# pinned_comments = ufipayload.get('pinnedcomments')
profile = try_get_first_value(ufipayload['profiles'])
text = comment['body']['text'] # safe_convert_text()
temp = {
'author': {
'name': profile.get('name')
},
'time_in_seconds': time_offset,
'time_text': seconds_to_time(time_offset),
'message': text
}
yield temp
def get_chat_by_video_id(self, video_id, params):
initial_info = self._get_initial_info(video_id, params)
start_time = params.get('start_time')
end_time = params.get('end_time')
is_live = initial_info.get('is_live')
# if start or end time specified, use chat replay...
# The tool works for both active and finished live streams.
# if start/end time are specified, vods will be prioritised
# if is live stream and no start/end time specified
if is_live and not start_time and not end_time:
generator = self._get_live_chat_messages_by_video_id(
video_id, params)
else:
max_duration = initial_info.get('duration', float('inf'))
generator = self._get_chat_replay_messages_by_video_id(
video_id, max_duration, params)
return Chat(
generator,
title=initial_info.get('title'),
duration=initial_info.get('duration'),
is_live=is_live,
author=initial_info.get('author'),
)
def get_chat(self, **kwargs):
url = kwargs.get('url')
match = re.search(self._VALID_URL, url)
if match:
if match.group('id'): # normal youtube video
return self.get_chat_by_video_id(match.group('id'), kwargs)
else: # TODO add profile, etc.
pass
| 31.697248 | 119 | 0.557081 | import json
from json.decoder import JSONDecodeError
import xml.etree.ElementTree as ET
import isodate
import re
from .common import (
Chat,
BaseChatDownloader,
Remapper as r
)
from requests.exceptions import RequestException
from ..utils import (
remove_prefixes,
multi_get,
try_get_first_value,
try_get,
seconds_to_time,
camel_case_split,
ensure_seconds,
attempts,
get_title_of_webpage,
log
)
class FacebookChatDownloader(BaseChatDownloader):
_FB_HOMEPAGE = 'https://www.facebook.com'
_FB_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': _FB_HOMEPAGE,
'Accept-Language': 'en-US,en;',
}
_INITIAL_DATR_REGEX = r'_js_datr\",\"([^\"]+)'
_INITIAL_LSD_REGEX = r'<input.*?name=\"lsd\".*?value=\"([^\"]+)[^>]*>'
def __init__(self, **kwargs):
super().__init__(**kwargs)
# update headers for all subsequent FB requests
self.update_session_headers(self._FB_HEADERS)
initial_data = self._session_get(
self._FB_HOMEPAGE,
headers=self._FB_HEADERS, allow_redirects=False).text
datr = re.search(self._INITIAL_DATR_REGEX, initial_data)
if datr:
datr = datr.group(1)
else:
print('unable to get datr cookie')
raise Exception # TODO
sb = self.get_cookie_value('sb')
fr = self.get_cookie_value('fr')
# print('sb:', sb, flush=True)
# print('fr:', fr, flush=True)
# print('datr:', datr, flush=True)
lsd_info = re.search(self._INITIAL_LSD_REGEX, initial_data)
if not lsd_info:
print('no lsd info')
raise Exception # TODO
lsd = lsd_info.group(1)
# print('lsd:', lsd, flush=True)
request_headers = {
# TODO sb and fr unnecessary?
# wd=1122x969;
'Cookie': 'sb={}; fr={}; datr={};'.format(sb, fr, datr)
}
self.update_session_headers(request_headers)
self.data = {
# TODO need things like jazoest? (and other stuff from hidden elements/html)
'__a': 1, # TODO needed?
'lsd': lsd,
}
_NAME = 'facebook.com'
# Regex provided by youtube-dl
_VALID_URL = r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com)/
(?:[^#]*?\#!/)?
(?:[^/]+/videos/(?:[^/]+/)?)
)
(?P<id>[0-9]+)
'''
_TESTS = [
]
_VIDEO_PAGE_TAHOE_TEMPLATE = _FB_HOMEPAGE + \
'/video/tahoe/async/{}/?chain=true&isvideo=true&payloadtype=primary'
def _parse_fb_json(self, response):
text_to_parse = remove_prefixes(response.text, 'for (;;);')
return json.loads(text_to_parse)
_VOD_COMMENTS_API = _FB_HOMEPAGE + '/videos/vodcomments/'
_GRAPH_API = _FB_HOMEPAGE + '/api/graphql/'
_VIDEO_URL_FORMAT = _FB_HOMEPAGE + '/video.php?v={}'
# _VIDEO_TITLE_REGEX = r'<meta\s+name=["\']description["\']\s+content=["\'](.*?)["\']\s*/>'
def _attempt_fb_retrieve(self, url, max_attempts, retry_timeout, fb_json=False, **post_kwargs):
for attempt_number in attempts(max_attempts):
try:
response = self._session_post(url, **post_kwargs)
if fb_json:
return self._parse_fb_json(response)
else:
return response.json()
except JSONDecodeError as e:
self.retry(attempt_number, max_attempts, e, retry_timeout,
text='Unable to parse JSON: `{}`'.format(response.text))
except RequestException as e:
self.retry(attempt_number, max_attempts, e, retry_timeout)
def _get_initial_info(self, video_id, params):
info = {}
max_attempts = params.get('max_attempts')
retry_timeout = params.get('retry_timeout')
# TODO remove duplication - many similar methods
json_data = self._attempt_fb_retrieve(
self._VIDEO_PAGE_TAHOE_TEMPLATE.format(video_id),
max_attempts,
retry_timeout,
True,
headers=self._FB_HEADERS, data=self.data
)
# print(json_data)
markup = multi_get(json_data, 'payload', 'video', 'markup', '__html')
video_markup = ET.fromstring(markup)
tags = [x.text for x in video_markup.findall(
'.//span[@class="_50f7"]')]
if len(tags) >= 2:
info['title'] = tags[0]
info['username'] = tags[1]
else:
video_page_url = self._VIDEO_URL_FORMAT.format(video_id)
for attempt_number in attempts(max_attempts):
try:
html = self._session_get(video_page_url).text
match = get_title_of_webpage(html)
if match:
title_info = match.split(' - ', 1)
if len(title_info) == 2:
info['username'] = title_info[0]
info['title'] = title_info[1]
break
except RequestException as e:
self.retry(attempt_number, max_attempts, e, retry_timeout)
instances = multi_get(json_data, 'jsmods', 'instances')
video_data = {}
for item in instances:
if try_get(item, lambda x: x[1][0]) == 'VideoConfig':
video_item = item[2][0]
if video_item.get('video_id'):
video_data = video_item['videoData'][0]
# print(video_data)
break
# print(video_data)
if not video_data:
print('unable to get video data')
raise Exception
dash_manifest = video_data.get('dash_manifest')
if dash_manifest: # when not live, this returns
dash_manifest_xml = ET.fromstring(dash_manifest)
info['duration'] = isodate.parse_duration(
dash_manifest_xml.attrib['mediaPresentationDuration']).total_seconds()
info['is_live'] = video_data['is_live_stream']
return info
@staticmethod
def _parse_feedback(feedback):
new_feedback = {}
edges = multi_get(feedback, 'top_reactions', 'edges')
if not edges:
return new_feedback
new_feedback['reaction_types'] = []
for edge in edges:
node = edge.get('node')
reaction_item = {
'key': node.get('key'),
'id': node.get('id'),
'name': node.get('reaction_type'),
'count': edge.get('reaction_count')
}
new_feedback['reaction_types'].append(reaction_item)
new_feedback['total_count'] = multi_get(feedback, 'reactors', 'count')
new_feedback['total_count_reduced'] = multi_get(
feedback, 'reactors', 'count_reduced')
return new_feedback
@staticmethod
def get_text(item):
return item.get('text') if item else None
@staticmethod
def parse_image(item):
return BaseChatDownloader.create_image(item.get('uri'), item.get('width'), item.get('height'))
@staticmethod
def get_uri(item):
return item.get('uri')
@staticmethod
def _parse_attachment_info(original_item):
item = {}
if isinstance(original_item, (list, tuple)) and len(original_item) > 0:
original_item = original_item[0]
if not original_item:
return item
for key in original_item:
BaseChatDownloader.remap(
item, FacebookChatDownloader._TARGET_MEDIA_REMAPPING, key, original_item[key])
# VideoTipJarPayment
quantity = item.get('quantity')
if quantity:
item['text'] = 'Sent {} Star{}'.format(
quantity, 's' if quantity != 1 else '')
# For photos:
blurred_image = item.pop('blurred_image', None)
massive_image = item.pop('massive_image', None)
if blurred_image and massive_image:
item['text'] = BaseChatDownloader.create_image(
blurred_image,
massive_image.get('width'),
massive_image.get('height')
)
# style_infos
donation_comment_text = item.pop('donation_comment_text', None)
if donation_comment_text:
entity = try_get(donation_comment_text,
lambda x: x['ranges'][0]['entity']) or {}
for key in entity:
BaseChatDownloader.remap(
item, FacebookChatDownloader._TARGET_MEDIA_REMAPPING, key, entity[key])
item['text'] = donation_comment_text.get('text')
# DEBUGGING
original_type_name = original_item.get('__typename')
if original_type_name not in FacebookChatDownloader._KNOWN_ATTACHMENT_TYPES:
print('debug')
print('unknown attachment type:', original_type_name)
print(original_item)
print(item)
input()
return item
@staticmethod
def _parse_target(media):
item = {}
return item
@staticmethod
def _parse_author_badges(item):
keys = (('badge_asset', 'small'), ('information_asset', 'colour'))
icons = list(map(lambda x: BaseChatDownloader.create_image(
FacebookChatDownloader._FB_HOMEPAGE + item.get(x[0]), 24, 24, x[1]), keys))
icons.append(BaseChatDownloader.create_image(
item.get('multiple_badge_asset'), 36, 36, 'large'))
return {
'title': item.get('text'),
'alternative_title': item.get('information_title'),
'description': item.get('information_description'),
'icons': icons,
# badge_asset
# multiple_badge_asset
# information_asset
'icon_name': item.get('identity_badge_type')
}
_ATTACHMENT_REMAPPING = {
'url': 'url', # facebook redirect url,
'source': r('source', get_text),
'title_with_entities': r('title', get_text),
'target': r('target', _parse_attachment_info),
'media': r('media', _parse_attachment_info),
'style_infos': r('style_infos', _parse_attachment_info),
'attachment_text': r('text', get_text),
}
_IGNORE_ATTACHMENT_KEYS = [
'tracking',
'action_links'
]
_KNOWN_ATTACHMENT_KEYS = set(
list(_ATTACHMENT_REMAPPING.keys()) + _IGNORE_ATTACHMENT_KEYS)
@staticmethod
def _parse_attachment_styles(item):
parsed = {}
attachment = multi_get(item, 'style_type_renderer', 'attachment')
if not attachment:
# TODO debug log
print('NO ATTACHMENT')
print(item)
return parsed
# set texts:
for key in attachment:
BaseChatDownloader.remap(
parsed, FacebookChatDownloader._ATTACHMENT_REMAPPING, key, attachment[key])
for key in ('target', 'media', 'style_infos'):
if parsed.get(key) == {}:
parsed.pop(key)
missing_keys = attachment.keys() - FacebookChatDownloader._KNOWN_ATTACHMENT_KEYS
if missing_keys:
print('MISSING ATTACHMENT KEYS:', missing_keys)
print(item)
print(parsed)
input()
return parsed
_TARGET_MEDIA_REMAPPING = {
'id': 'id',
'__typename': r('type', camel_case_split),
'fallback_image': r('image', parse_image),
'is_playable': 'is_playable',
'url': 'url',
'mobileUrl': 'mobile_url',
# Sticker
'pack': 'pack',
'label': 'label',
'image': r('image', parse_image),
# VideoTipJarPayment
'stars_image_on_star_quantity': 'icon',
'spark_quantity': 'quantity',
# Page
'name': 'name',
'category_name': 'category',
'address': 'address',
'overall_star_rating': 'overall_star_rating',
'profile_picture': r('profile_picture', get_uri),
# Photo
'accessibility_caption': 'accessibility_caption',
'blurred_image': r('blurred_image', get_uri),
'massive_image': 'massive_image',
# FundraiserForStoryDonationAttachmentStyleInfo
'donation_comment_text': 'donation_comment_text'
}
_KNOWN_ATTACHMENT_TYPES = [
'Sticker',
'VideoTipJarPayment',
'Page',
'Group',
'ProfilePicAttachmentMedia',
'User',
'Photo',
'ExternalUrl',
'GenericAttachmentMedia',
'ChatCommandResult',
'CommentMessageInfo',
'FundraiserForStoryDonationAttachmentStyleInfo'
]
_REMAPPING = {
'id': 'message_id',
'community_moderation_state': 'community_moderation_state',
# attachments
'author': 'author',
'feedback': r('reactions', _parse_feedback),
'created_time': r('timestamp', lambda x: x * 1000000),
'upvote_downvote_total': 'upvote_downvote_total',
'is_author_banned_by_content_owner': 'is_author_banned',
'is_author_original_poster': 'is_author_original_poster',
'is_author_bot': 'is_author_bot',
'is_author_non_coworker': 'is_author_non_coworker',
# if banned, ban_action?
'comment_parent': 'comment_parent',
'edit_history': r('number_of_edits', lambda x: x.get('count')),
'timestamp_in_video': 'time_in_seconds',
'written_while_video_was_live': 'written_while_video_was_live',
'translatability_for_viewer': r('message_dialect', lambda x: x.get('source_dialect_name')),
'url': 'message_url',
'body': r('message', get_text),
'identity_badges_web': r('author_badges', lambda x: list(map(FacebookChatDownloader._parse_author_badges, x))),
'attachments': r('attachments', lambda x: list(map(FacebookChatDownloader._parse_attachment_styles, x)))
}
_AUTHOR_REMAPPING = {
'id': 'id',
'name': 'name',
'__typename': r('type', camel_case_split),
'url': 'url',
'is_verified': 'is_verified',
'gender': r('gender', lambda x: x.lower()),
'short_name': 'short_name'
}
@ staticmethod
def _parse_live_stream_node(node):
# if info is None:
# info = {}
info = {}
for key in node:
BaseChatDownloader.remap(
info, FacebookChatDownloader._REMAPPING, key, node[key])
author_info = info.pop('author', {})
BaseChatDownloader.move_to_dict(info, 'author', create_when_empty=True)
for key in author_info:
BaseChatDownloader.remap(
info['author'], FacebookChatDownloader._AUTHOR_REMAPPING, key, author_info[key])
if 'profile_picture_depth_0' in author_info:
info['author']['images'] = []
for size in ((0, 32), (1, 24)):
url = multi_get(
author_info, 'profile_picture_depth_{}'.format(size[0]), 'uri')
info['author']['images'].append(
BaseChatDownloader.create_image(url, size[1], size[1]))
# author_badges = info.pop('author_badges', None)
# if author_badges:
# info['author']['badges'] = author_badges
in_reply_to = info.pop('comment_parent', None)
if isinstance(in_reply_to, dict) and in_reply_to:
info['in_reply_to'] = FacebookChatDownloader._parse_live_stream_node(
in_reply_to)
time_in_seconds = info.get('time_in_seconds')
if time_in_seconds is not None:
info['time_text'] = seconds_to_time(time_in_seconds)
message = info.get('message')
if message:
info['message'] = message
info['message_type'] = 'text_message'
else:
info.pop('message', None) # remove if empty
# remove the following if empty:
if info.get('reactions') == {}: # no reactions
info.pop('reactions')
if info.get('attachments') == []:
info.pop('attachments')
# print("AAAAAAAA")
# print(info.get('attachments'), node)
return info
def _get_live_chat_messages_by_video_id(self, video_id, params):
max_attempts = params.get('max_attempts')
retry_timeout = params.get('retry_timeout')
buffer_size = 25 # max num comments returned by api call
# cursor = ''
variables = {
'videoID': video_id
}
data = {
'variables': json.dumps(variables),
'doc_id': '4889623951078943', # specifies what API call this is?
# 'cursor' : cursor
# &first=12&after=<end_cursor>
}
data.update(self.data)
# p = (), params=p
first_try = True
last_ids = []
while True:
json_data = self._attempt_fb_retrieve(
self._GRAPH_API,
max_attempts,
retry_timeout,
headers=self._FB_HEADERS, data=data
)
feedback = multi_get(json_data, 'data', 'video', 'feedback') or {}
if not feedback:
print('no feedback') # TODO debug
print(json_data, flush=True)
continue
top_level_comments = multi_get(
json_data, 'data', 'video', 'feedback', 'top_level_comments')
edges = top_level_comments.get('edges')[::-1] # reverse order
errors = json_data.get('errors')
if errors:
# TODO will usually resume getting chat..
# maybe add timeout?
print('ERRORS DETECTED')
print(errors)
continue
# TODO - get pagination working
# page_info = top_level_comments.get('page_info')
# after = page_info.get('end_cursor')
num_to_add = 0
for edge in edges:
node = edge.get('node')
if not node:
# TODO debug
print('no node found in edge')
print(edge)
continue
comment_id = node.get('id')
# remove items that have already been parsed
if comment_id in last_ids:
# print('=', end='', flush=True)
continue
last_ids.append(comment_id)
last_ids = last_ids[-buffer_size:] # force x items
if not node:
# TODO debug
print('no node', edge)
continue
parsed_node = FacebookChatDownloader._parse_live_stream_node(
node)
# TODO determine whether to add or not
num_to_add += 1
yield parsed_node
# got 25 items, and this isn't the first one
if num_to_add >= buffer_size and not first_try:
log(
'warning',
'Messages may be coming in faster than requests are being made.'
)
if not top_level_comments:
print('err2')
print(json_data)
if first_try:
first_try = False
def _get_chat_replay_messages_by_video_id(self, video_id, max_duration, params):
max_attempts = params.get('max_attempts')
retry_timeout = params.get('retry_timeout')
# useful tool (convert curl to python request)
# https://curl.trillworks.com/
# timeout_duration = 10 # TODO make this modifiable
initial_request_params = (
('eft_id', video_id),
('target_ufi_instance_id', 'u_2_1'),
# ('should_backfill', 'false'), # used when seeking? - # TODO true on first try?
)
time_increment = 60 # Facebook gets messages by the minute
# TODO make this modifiable
start_time = ensure_seconds(
params.get('start_time'), 0)
end_time = ensure_seconds(
params.get('end_time'), float('inf'))
next_start_time = max(start_time, 0)
end_time = min(end_time, max_duration)
# print(next_start_time, end_time, type(next_start_time), type(end_time))
# return
# total = []
while True:
next_end_time = min(next_start_time + time_increment, end_time)
times = (('start_time', next_start_time),
('end_time', next_end_time))
# print(times, flush=True)
request_params = initial_request_params + times
json_data = self._attempt_fb_retrieve(
self._VOD_COMMENTS_API,
max_attempts,
retry_timeout,
True,
headers=self._FB_HEADERS, params=request_params, data=self.data
)
payloads = multi_get(json_data, 'payload', 'ufipayloads')
if not payloads:
continue
# TODO debug
# print('no comments between',next_start_time, next_end_time, flush=True)
# print('err1')
# print(json_data)
next_start_time = next_end_time
if next_start_time >= end_time:
print('end')
return
for payload in payloads:
time_offset = payload.get('timeoffset')
# print(test)
ufipayload = payload.get('ufipayload')
if not ufipayload:
print('no ufipayload', payload)
continue
# ['comments'][0]['body']['text']
comment = try_get(ufipayload, lambda x: x['comments'][0])
if not comment:
# TODO debug
continue
# pinned_comments = ufipayload.get('pinnedcomments')
profile = try_get_first_value(ufipayload['profiles'])
text = comment['body']['text'] # safe_convert_text()
temp = {
'author': {
'name': profile.get('name')
},
'time_in_seconds': time_offset,
'time_text': seconds_to_time(time_offset),
'message': text
}
yield temp
def get_chat_by_video_id(self, video_id, params):
initial_info = self._get_initial_info(video_id, params)
start_time = params.get('start_time')
end_time = params.get('end_time')
is_live = initial_info.get('is_live')
# if start or end time specified, use chat replay...
# The tool works for both active and finished live streams.
# if start/end time are specified, vods will be prioritised
# if is live stream and no start/end time specified
if is_live and not start_time and not end_time:
generator = self._get_live_chat_messages_by_video_id(
video_id, params)
else:
max_duration = initial_info.get('duration', float('inf'))
generator = self._get_chat_replay_messages_by_video_id(
video_id, max_duration, params)
return Chat(
generator,
title=initial_info.get('title'),
duration=initial_info.get('duration'),
is_live=is_live,
author=initial_info.get('author'),
)
def get_chat(self, **kwargs):
url = kwargs.get('url')
match = re.search(self._VALID_URL, url)
if match:
if match.group('id'): # normal youtube video
return self.get_chat_by_video_id(match.group('id'), kwargs)
else: # TODO add profile, etc.
pass
| true | true |
f73140c2dd100d80311a5c5ccca1e3c3caf4075d | 3,746 | py | Python | postcode_validator_uk/rules.py | ioannavlahou/postcode-validator-uk | e43b2919a7d7e940ae072b24ab5d07587e8e3df8 | [
"MIT"
] | 4 | 2020-02-08T15:02:00.000Z | 2020-11-22T19:35:11.000Z | postcode_validator_uk/rules.py | ioannavlahou/postcode-validator-uk | e43b2919a7d7e940ae072b24ab5d07587e8e3df8 | [
"MIT"
] | 8 | 2021-06-23T12:36:40.000Z | 2021-12-21T11:26:27.000Z | postcode_validator_uk/rules.py | ioannavlahou/postcode-validator-uk | e43b2919a7d7e940ae072b24ab5d07587e8e3df8 | [
"MIT"
] | 2 | 2020-12-04T10:47:07.000Z | 2021-06-08T20:45:45.000Z | import re
from .exceptions import InvalidPostcode
class PostcodeRule:
attr_applied = None
applied_areas_regex = None
rule_regex = None
def __init__(self, postcode):
self.postcode = postcode
def validate(self):
postcode_attr_value = getattr(self.postcode, self.attr_applied, None)
if not postcode_attr_value:
raise AttributeError(f"This entity has not attr {self.attr_applied}")
if not self.applied_areas_regex.match(postcode_attr_value):
return
if not self.rule_regex.match(postcode_attr_value):
raise InvalidPostcode
class SingleDigitDistrict(PostcodeRule):
"""
Areas with only single-digit districts: BR, FY, HA, HD, HG, HR, HS, HX, JE, LD, SM, SR, WC, WN, ZE
(although WC is always subdivided by a further letter, e.g. WC1A)
"""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(BR|FY|HA|HD|HG|HR|HS|HX|JE|LD|SM|SR|WC|WN|ZE)")
rule_regex = re.compile(r"^(?!WC)[A-Z]{2}[0-9]$|^WC[0-9][A-Z]$")
class DoubleDigitDistrict(PostcodeRule):
"""Areas with only double-digit districts: AB, LL, SO"""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(AB|LL|SO)")
rule_regex = re.compile(r"^[A-Z]{2}[0-9]{2}$")
class ZeroOrTenDistrict(PostcodeRule):
"""
Areas with a district '0' (zero): BL, BS, CM, CR, FY, HA, PR, SL, SS
(BS is the only area to have both a district 0 and a district 10)
"""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z]{2}(0|10)$")
rule_regex = re.compile(r"^(BL|BS|CM|CR|FY|HA|PR|SL|SS)0$|^BS10$")
class CentralLondonDistrict(PostcodeRule):
"""
The following central London single-digit districts have been further divided by inserting a letter after
the digit and before the space: EC1–EC4 (but not EC50), SW1, W1, WC1, WC2 and parts of E1 (E1W),
N1 (N1C and N1P), NW1 (NW1W) and SE1 (SE1P).
"""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(EC[0-9]|E1|N1|NW1|SE1|SW1|W1|WC1|WC2)[A-Z]")
rule_regex = re.compile(
r"^EC[1-4][A-Z]?$|^E1[W]?$|^N1[C|P]?$|^NW1[W]?$|^SE1[P]?$|^SW1[A-Z]?$|^W1[A-Z]?$|^WC[1-2][A-Z]?$"
)
class FirstLetter(PostcodeRule):
"""The letters Q, V and X are not used in the first position."""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(Q|V|X)")
rule_regex = re.compile(r"^(?!Q|V|X).*")
class SecondLetter(PostcodeRule):
"""The letters I, J and Z are not used in the second position."""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z](I|J|Z)")
rule_regex = re.compile(r"^[A-Z](?!I|J|Z).*")
class ThirdLetter(PostcodeRule):
"""
The only letters to appear in the third position are A, B, C, D, E, F, G, H, J, K, P, S, T, U and W
when the structure starts with A9A.
"""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z][0-9][A-Z]$")
rule_regex = re.compile(r"^[A-Z][0-9](A|B|C|D|E|F|G|H|J|K|P|S|T|U|W)$")
class FourthLetter(PostcodeRule):
"""
The only letters to appear in the fourth position are A, B, E, H, M, N, P, R, V, W, X and Y
when the structure starts with AA9A.
"""
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z]{2}[0-9][A-Z]$")
rule_regex = re.compile(r"^[A-Z]{2}[0-9](A|B|E|H|M|N|P|R|V|W|X|Y)$")
class LastTwoLetter(PostcodeRule):
"""
The final two letters do not use C, I, K, M, O or V, so as not to resemble digits
or each other when hand-written.
"""
attr_applied = "inward"
applied_areas_regex = re.compile(r"^[0-9][A-Z]{2}$")
rule_regex = re.compile(r"^[0-9][A|B|D|E|F|G|H|J|L|N|P|Q|R|S|T|U|W|X|Y|Z]{2}$")
| 32.017094 | 109 | 0.626535 | import re
from .exceptions import InvalidPostcode
class PostcodeRule:
attr_applied = None
applied_areas_regex = None
rule_regex = None
def __init__(self, postcode):
self.postcode = postcode
def validate(self):
postcode_attr_value = getattr(self.postcode, self.attr_applied, None)
if not postcode_attr_value:
raise AttributeError(f"This entity has not attr {self.attr_applied}")
if not self.applied_areas_regex.match(postcode_attr_value):
return
if not self.rule_regex.match(postcode_attr_value):
raise InvalidPostcode
class SingleDigitDistrict(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(BR|FY|HA|HD|HG|HR|HS|HX|JE|LD|SM|SR|WC|WN|ZE)")
rule_regex = re.compile(r"^(?!WC)[A-Z]{2}[0-9]$|^WC[0-9][A-Z]$")
class DoubleDigitDistrict(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(AB|LL|SO)")
rule_regex = re.compile(r"^[A-Z]{2}[0-9]{2}$")
class ZeroOrTenDistrict(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z]{2}(0|10)$")
rule_regex = re.compile(r"^(BL|BS|CM|CR|FY|HA|PR|SL|SS)0$|^BS10$")
class CentralLondonDistrict(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(EC[0-9]|E1|N1|NW1|SE1|SW1|W1|WC1|WC2)[A-Z]")
rule_regex = re.compile(
r"^EC[1-4][A-Z]?$|^E1[W]?$|^N1[C|P]?$|^NW1[W]?$|^SE1[P]?$|^SW1[A-Z]?$|^W1[A-Z]?$|^WC[1-2][A-Z]?$"
)
class FirstLetter(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^(Q|V|X)")
rule_regex = re.compile(r"^(?!Q|V|X).*")
class SecondLetter(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z](I|J|Z)")
rule_regex = re.compile(r"^[A-Z](?!I|J|Z).*")
class ThirdLetter(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z][0-9][A-Z]$")
rule_regex = re.compile(r"^[A-Z][0-9](A|B|C|D|E|F|G|H|J|K|P|S|T|U|W)$")
class FourthLetter(PostcodeRule):
attr_applied = "outward"
applied_areas_regex = re.compile(r"^[A-Z]{2}[0-9][A-Z]$")
rule_regex = re.compile(r"^[A-Z]{2}[0-9](A|B|E|H|M|N|P|R|V|W|X|Y)$")
class LastTwoLetter(PostcodeRule):
attr_applied = "inward"
applied_areas_regex = re.compile(r"^[0-9][A-Z]{2}$")
rule_regex = re.compile(r"^[0-9][A|B|D|E|F|G|H|J|L|N|P|Q|R|S|T|U|W|X|Y|Z]{2}$")
| true | true |
f73140d48b7cc619133309d7e0b2c6781efb1c0a | 9,038 | py | Python | twentyfortyeight/strategy/nn/data.py | ggould256/twentyfortyeight | 7d2b88023077ba4c64b65617d493039c0a9998c3 | [
"MIT"
] | null | null | null | twentyfortyeight/strategy/nn/data.py | ggould256/twentyfortyeight | 7d2b88023077ba4c64b65617d493039c0a9998c3 | [
"MIT"
] | null | null | null | twentyfortyeight/strategy/nn/data.py | ggould256/twentyfortyeight | 7d2b88023077ba4c64b65617d493039c0a9998c3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Classes and functions related to dataset generation for learning Q
functions. Datasets in this sense are mappings from board positions
(represented as flattened arrays of tile numbers) to score values.
"""
import argparse
import sys
import numpy as np
from game.common import *
from game.board import Board
from game.game import Game
EXAMPLE_WIDTH = Board.vector_width()
MAX_BATCH_SIZE = 4096 # numpy arrays get slow to update beyond this size.
class Dataset(object):
"""A set of training data (held as matrices whose rows are examples) and a
column vector of the example scores.."""
def __init__(self):
"""Creates a new empty dataset."""
self._num_examples = 0
self._example_batches = [np.zeros((0, EXAMPLE_WIDTH))]
self._score_batches = [np.zeros((0, 1))]
def add_game(self, player_strategy, rnd, starting_game_position=None):
"""Runs a game with the given strategy and randomness source, then
enrolls the outcome in the dataset.
If @p starting_position is a Game object, start from that position.
Returns the number of examples (moves) added.
"""
states = np.zeros((1, EXAMPLE_WIDTH))
num_moves = 0
game = starting_game_position or Game(rnd=rnd)
running = True
while running:
intermediate_board, turn_outcome = (
game.do_turn_and_retrieve_intermediate(
player_strategy.get_move(game.board(), game.score())))
running = (turn_outcome != GAMEOVER)
num_moves += (turn_outcome != ILLEGAL)
if turn_outcome == OK:
states = np.append(states,
Board.as_vector(intermediate_board),
axis=0)
self._num_examples += 1
player_strategy.notify_outcome(game.board(), game.score())
scores = Dataset.evaluate_states(states, game.board(), game.score)
assert(len(states) == len(scores))
batch_size_so_far = self._example_batches[-1].shape[0]
if len(states) + batch_size_so_far > MAX_BATCH_SIZE:
self._example_batches.append(np.zeros((0, EXAMPLE_WIDTH)))
self._score_batches.append(np.zeros((0, 1)))
self._example_batches[-1] = \
np.append(self._example_batches[-1], states, axis=0)
self._score_batches[-1] = np.append(self._score_batches[-1], scores)
return len(states)
@staticmethod
def evaluate_states(states, end_board, end_score):
"""Associate a Q score with each state of the current game. There are
many possible designs here, ranging from applying the ultimate score or
highest attained tile to all of the states to scoring each state with
the number of moves remaining in its game. The correct function is
not obvious; the current implementation is moves-remaining."""
del end_board, end_score
return np.array(list(range(len(states), 0, -1)))
def add_n_examples(self, strategy, rnd, n,
starting_positions_dataset=None):
"""Runs games and adds them to the dataset until at least @p n
examples have been added. Returns the number of examples added.
If @p starting_positions_dataset is set, games will be started from
a randomly selected position from that dataset rather than from a
blank board."""
print("Adding", n, "examples to dataset.")
added = 0
while added < n:
starting_game = None
if starting_positions_dataset:
random_position = starting_positions_dataset.nth_example(
rnd.randint(0,
starting_positions_dataset.num_examples() - 1))
starting_game = Game(Board.from_vector(random_position))
if not starting_game.board().can_move():
continue
num_added = self.add_game(strategy, rnd, starting_game)
if (added // 10000) != ((num_added + added) // 10000):
print("Added %d so far..." % (num_added + added))
added += num_added
return added
def num_batches(self):
return len(self._example_batches)
def num_examples(self):
return self._num_examples
def example_batches(self):
return self._example_batches
def nth_example(self, n):
counter = n
for batch in self._example_batches:
size = batch.shape[0]
if counter < size:
return batch[counter, :]
else:
counter -= size
return None
def nth_score(self, n):
counter = n
for batch in self._score_batches:
size = batch.shape[0]
if counter < size:
return batch[counter]
else:
counter -= size
return None
def score_batches(self):
return self._score_batches
def collapse(self):
"""Collapses all of the batches down to a single, very large batch."""
self._score_batches = [np.concatenate(self._score_batches)]
self._example_batches = [np.concatenate(self._example_batches)]
def save(self, filename):
assert(filename.endswith(".npz"))
num_batches = len(self._example_batches)
examples_dict = {"examples_%s" % i: self._example_batches[i]
for i in range(num_batches)}
scores_dict = {"scores_%s" % i: self._score_batches[i]
for i in range(num_batches)}
unified_dict = {**examples_dict, **scores_dict}
with open(filename, "wb") as f:
np.savez(f, **unified_dict)
@staticmethod
def load(filename):
assert(filename.endswith(".npz"))
with open(filename, "rb") as f:
npz_data = np.load(f)
data = Dataset()
data._example_batches = []
data._score_batches = []
num_batches = len(npz_data.files) // 2
for i in range(num_batches):
data._example_batches.append(
npz_data["examples_%s" % i])
data._score_batches.append(
npz_data["scores_%s" % i])
data._num_examples = sum(array.shape[0]
for array in data._example_batches)
return data
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--num_examples', metavar='N', type=int,
help="Number of examples (at minimum) to generate")
parser.add_argument('--output_file', metavar='FILENAME', type=str,
help="npz file into which to write example data")
parser.add_argument('--strategy', metavar='FILE_OR_NAME', type=str,
help="name of strategy or filename of model",
default="random")
parser.add_argument('--starting_positions', metavar='FILENAME', type=str,
default=None,
help=("If set, start some or all games from positions"
"drawn from this dataset"))
parser.add_argument('--new_start_fraction', metavar='FRACTION', type=float,
default=1.,
help=("If --starting_positions is set, start this "
"fraction of games from a new game position"))
args = parser.parse_args(argv[1:])
import random
from strategy.basic import RandomStrategy, SpinnyStrategy
from strategy.nn.nn_strategy import ModelStrategy
if args.strategy == "spinny":
strategy = SpinnyStrategy()
elif args.strategy == "random":
strategy = RandomStrategy()
else:
strategy = ModelStrategy(args.strategy)
start_positions_dataset = None
if args.starting_positions:
start_positions_dataset = Dataset.load(args.starting_positions)
dataset = Dataset()
num_added = dataset.add_n_examples(
strategy, random, args.num_examples * args.new_start_fraction)
if args.new_start_fraction < 1:
assert start_positions_dataset, \
"--new_start_fraction requires --starting_positions"
num_added = dataset.add_n_examples(
strategy, random, args.num_examples * (1 - args.new_start_fraction),
starting_positions_dataset=start_positions_dataset)
print("Added", num_added, "examples")
print("saving...")
dataset.save(args.output_file)
print("...saved.")
print("checking output file validity...")
check_data = Dataset.load(args.output_file)
assert dataset.num_batches() == check_data.num_batches(), \
("original batch number %s does not equal output batch number %s"
% (dataset.num_batches(), check_data.num_batches()))
check_data.collapse()
print("...output is valid.")
if __name__ == '__main__':
main(sys.argv)
| 39.99115 | 80 | 0.610423 |
import argparse
import sys
import numpy as np
from game.common import *
from game.board import Board
from game.game import Game
EXAMPLE_WIDTH = Board.vector_width()
MAX_BATCH_SIZE = 4096
class Dataset(object):
def __init__(self):
self._num_examples = 0
self._example_batches = [np.zeros((0, EXAMPLE_WIDTH))]
self._score_batches = [np.zeros((0, 1))]
def add_game(self, player_strategy, rnd, starting_game_position=None):
states = np.zeros((1, EXAMPLE_WIDTH))
num_moves = 0
game = starting_game_position or Game(rnd=rnd)
running = True
while running:
intermediate_board, turn_outcome = (
game.do_turn_and_retrieve_intermediate(
player_strategy.get_move(game.board(), game.score())))
running = (turn_outcome != GAMEOVER)
num_moves += (turn_outcome != ILLEGAL)
if turn_outcome == OK:
states = np.append(states,
Board.as_vector(intermediate_board),
axis=0)
self._num_examples += 1
player_strategy.notify_outcome(game.board(), game.score())
scores = Dataset.evaluate_states(states, game.board(), game.score)
assert(len(states) == len(scores))
batch_size_so_far = self._example_batches[-1].shape[0]
if len(states) + batch_size_so_far > MAX_BATCH_SIZE:
self._example_batches.append(np.zeros((0, EXAMPLE_WIDTH)))
self._score_batches.append(np.zeros((0, 1)))
self._example_batches[-1] = \
np.append(self._example_batches[-1], states, axis=0)
self._score_batches[-1] = np.append(self._score_batches[-1], scores)
return len(states)
@staticmethod
def evaluate_states(states, end_board, end_score):
del end_board, end_score
return np.array(list(range(len(states), 0, -1)))
def add_n_examples(self, strategy, rnd, n,
starting_positions_dataset=None):
print("Adding", n, "examples to dataset.")
added = 0
while added < n:
starting_game = None
if starting_positions_dataset:
random_position = starting_positions_dataset.nth_example(
rnd.randint(0,
starting_positions_dataset.num_examples() - 1))
starting_game = Game(Board.from_vector(random_position))
if not starting_game.board().can_move():
continue
num_added = self.add_game(strategy, rnd, starting_game)
if (added // 10000) != ((num_added + added) // 10000):
print("Added %d so far..." % (num_added + added))
added += num_added
return added
def num_batches(self):
return len(self._example_batches)
def num_examples(self):
return self._num_examples
def example_batches(self):
return self._example_batches
def nth_example(self, n):
counter = n
for batch in self._example_batches:
size = batch.shape[0]
if counter < size:
return batch[counter, :]
else:
counter -= size
return None
def nth_score(self, n):
counter = n
for batch in self._score_batches:
size = batch.shape[0]
if counter < size:
return batch[counter]
else:
counter -= size
return None
def score_batches(self):
return self._score_batches
def collapse(self):
self._score_batches = [np.concatenate(self._score_batches)]
self._example_batches = [np.concatenate(self._example_batches)]
def save(self, filename):
assert(filename.endswith(".npz"))
num_batches = len(self._example_batches)
examples_dict = {"examples_%s" % i: self._example_batches[i]
for i in range(num_batches)}
scores_dict = {"scores_%s" % i: self._score_batches[i]
for i in range(num_batches)}
unified_dict = {**examples_dict, **scores_dict}
with open(filename, "wb") as f:
np.savez(f, **unified_dict)
@staticmethod
def load(filename):
assert(filename.endswith(".npz"))
with open(filename, "rb") as f:
npz_data = np.load(f)
data = Dataset()
data._example_batches = []
data._score_batches = []
num_batches = len(npz_data.files) // 2
for i in range(num_batches):
data._example_batches.append(
npz_data["examples_%s" % i])
data._score_batches.append(
npz_data["scores_%s" % i])
data._num_examples = sum(array.shape[0]
for array in data._example_batches)
return data
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--num_examples', metavar='N', type=int,
help="Number of examples (at minimum) to generate")
parser.add_argument('--output_file', metavar='FILENAME', type=str,
help="npz file into which to write example data")
parser.add_argument('--strategy', metavar='FILE_OR_NAME', type=str,
help="name of strategy or filename of model",
default="random")
parser.add_argument('--starting_positions', metavar='FILENAME', type=str,
default=None,
help=("If set, start some or all games from positions"
"drawn from this dataset"))
parser.add_argument('--new_start_fraction', metavar='FRACTION', type=float,
default=1.,
help=("If --starting_positions is set, start this "
"fraction of games from a new game position"))
args = parser.parse_args(argv[1:])
import random
from strategy.basic import RandomStrategy, SpinnyStrategy
from strategy.nn.nn_strategy import ModelStrategy
if args.strategy == "spinny":
strategy = SpinnyStrategy()
elif args.strategy == "random":
strategy = RandomStrategy()
else:
strategy = ModelStrategy(args.strategy)
start_positions_dataset = None
if args.starting_positions:
start_positions_dataset = Dataset.load(args.starting_positions)
dataset = Dataset()
num_added = dataset.add_n_examples(
strategy, random, args.num_examples * args.new_start_fraction)
if args.new_start_fraction < 1:
assert start_positions_dataset, \
"--new_start_fraction requires --starting_positions"
num_added = dataset.add_n_examples(
strategy, random, args.num_examples * (1 - args.new_start_fraction),
starting_positions_dataset=start_positions_dataset)
print("Added", num_added, "examples")
print("saving...")
dataset.save(args.output_file)
print("...saved.")
print("checking output file validity...")
check_data = Dataset.load(args.output_file)
assert dataset.num_batches() == check_data.num_batches(), \
("original batch number %s does not equal output batch number %s"
% (dataset.num_batches(), check_data.num_batches()))
check_data.collapse()
print("...output is valid.")
if __name__ == '__main__':
main(sys.argv)
| true | true |
f73142a10b36c905718b48cfe342e39d8d3f7c7d | 1,963 | py | Python | setup.py | Tibieson/pyueye | 67b316b69b49bc29151a11e61ce1ea70432513e9 | [
"BSD-3-Clause"
] | 4 | 2016-06-07T10:09:12.000Z | 2019-11-27T01:10:46.000Z | setup.py | Tibieson/pyueye | 67b316b69b49bc29151a11e61ce1ea70432513e9 | [
"BSD-3-Clause"
] | 1 | 2017-06-27T10:17:37.000Z | 2017-06-27T10:17:37.000Z | setup.py | Tibieson/pyueye | 67b316b69b49bc29151a11e61ce1ea70432513e9 | [
"BSD-3-Clause"
] | 3 | 2017-02-15T06:59:39.000Z | 2020-09-29T09:57:13.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys, os, stat, commands
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
try:
from Cython.Distutils import build_ext
except:
print "You don't seem to have Cython installed. Please get a"
print "copy from www.cython.org and install it"
sys.exit(1)
# scan the directory for extension files, converting
# them to extension names in dotted notation
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[2:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
# generate an Extension object from its dotted name
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".pyx"
pxdPath = extName.replace(".", os.path.sep)+".pxd"
if os.path.isfile(pxdPath):
flist=[extPath,pxdPath]
else:
flist=[extPath]
return Extension(
extName,
flist,
include_dirs = ["."], # adding the '.' to include_dirs is CRUCIAL!!
extra_compile_args = ["-D__LINUX__"],#["-O3", "-Wall","-D__LINUX__","-march=native"],#
#extra_link_args = ['-g'],
libraries = ["ueye_api",],
)
extNames = scandir(".")
# and build up the set of Extension objects
extensions = [makeExtension(name) for name in extNames]
setup(
version ='??',
name = "pyueye",
author= 'Ricardo Amezquita Orozco',
author_email='ramezquitao@cihologramas.com',
description='Python binding for ueye camera drivers',
license='BSD',
url='https://trac.cihologramas.com/trac/',
ext_modules=cythonize(extensions),
packages=["ueye","wxueye"],
scripts=['wxVidCap.py'],
cmdclass = {'build_ext': build_ext},
)
| 29.742424 | 94 | 0.629139 |
import sys, os, stat, commands
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
try:
from Cython.Distutils import build_ext
except:
print "You don't seem to have Cython installed. Please get a"
print "copy from www.cython.org and install it"
sys.exit(1)
# scan the directory for extension files, converting
# them to extension names in dotted notation
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[2:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
# generate an Extension object from its dotted name
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".pyx"
pxdPath = extName.replace(".", os.path.sep)+".pxd"
if os.path.isfile(pxdPath):
flist=[extPath,pxdPath]
else:
flist=[extPath]
return Extension(
extName,
flist,
include_dirs = ["."], # adding the '.' to include_dirs is CRUCIAL!!
extra_compile_args = ["-D__LINUX__"],#["-O3", "-Wall","-D__LINUX__","-march=native"],#
#extra_link_args = ['-g'],
libraries = ["ueye_api",],
)
extNames = scandir(".")
# and build up the set of Extension objects
extensions = [makeExtension(name) for name in extNames]
setup(
version ='??',
name = "pyueye",
author= 'Ricardo Amezquita Orozco',
author_email='ramezquitao@cihologramas.com',
description='Python binding for ueye camera drivers',
license='BSD',
url='https://trac.cihologramas.com/trac/',
ext_modules=cythonize(extensions),
packages=["ueye","wxueye"],
scripts=['wxVidCap.py'],
cmdclass = {'build_ext': build_ext},
)
| false | true |
f73142a29e58f8c444562a3781b8fbaa6e06ccc2 | 2,085 | py | Python | scout/server/blueprints/institutes/controllers.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | scout/server/blueprints/institutes/controllers.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | scout/server/blueprints/institutes/controllers.py | gmc-norr/scout | ea8eaaa079c63e4033af6216ec08da4a314f9b5c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
LOG = logging.getLogger(__name__)
def institute(store, institute_id):
""" Process institute data.
Args:
store(adapter.MongoAdapter)
institute_id(str)
Returns
data(dict): includes institute obj and specific settings
"""
institute_obj = store.institute(institute_id)
users = store.users(institute_id)
data = {"institute": institute_obj, "users": users}
return data
def update_institute_settings(store, institute_obj, form):
""" Update institute settings with data collected from institute form
Args:
score(adapter.MongoAdapter)
institute_id(str)
form(dict)
Returns:
updated_institute(dict)
"""
sanger_recipients = []
sharing_institutes = []
phenotype_groups = []
group_abbreviations = []
cohorts = []
for email in form.getlist("sanger_emails"):
sanger_recipients.append(email.strip())
for inst in form.getlist("institutes"):
sharing_institutes.append(inst)
for pheno_group in form.getlist("pheno_groups"):
phenotype_groups.append(pheno_group.split(" ,")[0])
group_abbreviations.append(
pheno_group[pheno_group.find("( ") + 2 : pheno_group.find(" )")]
)
if form.get("hpo_term") and form.get("pheno_abbrev"):
phenotype_groups.append(form["hpo_term"].split(" |")[0])
group_abbreviations.append(form["pheno_abbrev"])
for cohort in form.getlist("cohorts"):
cohorts.append(cohort.strip())
updated_institute = store.update_institute(
internal_id=institute_obj["_id"],
sanger_recipients=sanger_recipients,
coverage_cutoff=int(form.get("coverage_cutoff")),
frequency_cutoff=float(form.get("frequency_cutoff")),
display_name=form.get("display_name"),
phenotype_groups=phenotype_groups,
group_abbreviations=group_abbreviations,
add_groups=False,
sharing_institutes=sharing_institutes,
cohorts=cohorts,
)
return updated_institute
| 27.8 | 76 | 0.664748 |
import logging
LOG = logging.getLogger(__name__)
def institute(store, institute_id):
institute_obj = store.institute(institute_id)
users = store.users(institute_id)
data = {"institute": institute_obj, "users": users}
return data
def update_institute_settings(store, institute_obj, form):
sanger_recipients = []
sharing_institutes = []
phenotype_groups = []
group_abbreviations = []
cohorts = []
for email in form.getlist("sanger_emails"):
sanger_recipients.append(email.strip())
for inst in form.getlist("institutes"):
sharing_institutes.append(inst)
for pheno_group in form.getlist("pheno_groups"):
phenotype_groups.append(pheno_group.split(" ,")[0])
group_abbreviations.append(
pheno_group[pheno_group.find("( ") + 2 : pheno_group.find(" )")]
)
if form.get("hpo_term") and form.get("pheno_abbrev"):
phenotype_groups.append(form["hpo_term"].split(" |")[0])
group_abbreviations.append(form["pheno_abbrev"])
for cohort in form.getlist("cohorts"):
cohorts.append(cohort.strip())
updated_institute = store.update_institute(
internal_id=institute_obj["_id"],
sanger_recipients=sanger_recipients,
coverage_cutoff=int(form.get("coverage_cutoff")),
frequency_cutoff=float(form.get("frequency_cutoff")),
display_name=form.get("display_name"),
phenotype_groups=phenotype_groups,
group_abbreviations=group_abbreviations,
add_groups=False,
sharing_institutes=sharing_institutes,
cohorts=cohorts,
)
return updated_institute
| true | true |
f731432c913f3ccfd7e302c02503a7537510ff9a | 2,720 | py | Python | pipenv/vendor/pythonfinder/models/windows.py | Enzime/pipenv | d4f710be4a39e09a82a5133b7b3a277ee9bfb13a | [
"MIT"
] | null | null | null | pipenv/vendor/pythonfinder/models/windows.py | Enzime/pipenv | d4f710be4a39e09a82a5133b7b3a277ee9bfb13a | [
"MIT"
] | null | null | null | pipenv/vendor/pythonfinder/models/windows.py | Enzime/pipenv | d4f710be4a39e09a82a5133b7b3a277ee9bfb13a | [
"MIT"
] | 1 | 2021-07-03T03:30:45.000Z | 2021-07-03T03:30:45.000Z | # -*- coding=utf-8 -*-
from __future__ import print_function, absolute_import
import attr
import operator
from collections import defaultdict
from . import BaseFinder
from .path import PathEntry
from .python import PythonVersion, VersionMap
from ..exceptions import InvalidPythonVersion
from ..utils import ensure_path
@attr.s
class WindowsFinder(BaseFinder):
paths = attr.ib(default=attr.Factory(list))
version_list = attr.ib(default=attr.Factory(list))
versions = attr.ib()
pythons = attr.ib()
def find_all_python_versions(
self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None
):
version_matcher = operator.methodcaller(
"matches",
major=major,
minor=minor,
patch=patch,
pre=pre,
dev=dev,
arch=arch,
)
py_filter = filter(
None, filter(lambda c: version_matcher(c), self.version_list)
)
version_sort = operator.attrgetter("version_sort")
return [c.comes_from for c in sorted(py_filter, key=version_sort, reverse=True)]
def find_python_version(
self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None
):
return next(
(
v
for v in self.find_all_python_versions(
major=major, minor=minor, patch=patch, pre=pre, dev=dev, arch=arch
)
),
None,
)
@versions.default
def get_versions(self):
versions = defaultdict(PathEntry)
from pythonfinder._vendor.pep514tools import environment as pep514env
env_versions = pep514env.findall()
path = None
for version_object in env_versions:
path = ensure_path(version_object.info.install_path.__getattr__(""))
try:
py_version = PythonVersion.from_windows_launcher(version_object)
except InvalidPythonVersion:
continue
self.version_list.append(py_version)
base_dir = PathEntry.create(
path,
is_root=True,
only_python=True,
pythons={py_version.comes_from.path: py_version},
)
versions[py_version.version_tuple[:5]] = base_dir
self.paths.append(base_dir)
return versions
@pythons.default
def get_pythons(self):
pythons = defaultdict()
for version in self.version_list:
_path = ensure_path(version.comes_from.path)
pythons[_path.as_posix()] = version.comes_from
return pythons
@classmethod
def create(cls):
return cls()
| 31.627907 | 88 | 0.608088 |
from __future__ import print_function, absolute_import
import attr
import operator
from collections import defaultdict
from . import BaseFinder
from .path import PathEntry
from .python import PythonVersion, VersionMap
from ..exceptions import InvalidPythonVersion
from ..utils import ensure_path
@attr.s
class WindowsFinder(BaseFinder):
paths = attr.ib(default=attr.Factory(list))
version_list = attr.ib(default=attr.Factory(list))
versions = attr.ib()
pythons = attr.ib()
def find_all_python_versions(
self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None
):
version_matcher = operator.methodcaller(
"matches",
major=major,
minor=minor,
patch=patch,
pre=pre,
dev=dev,
arch=arch,
)
py_filter = filter(
None, filter(lambda c: version_matcher(c), self.version_list)
)
version_sort = operator.attrgetter("version_sort")
return [c.comes_from for c in sorted(py_filter, key=version_sort, reverse=True)]
def find_python_version(
self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None
):
return next(
(
v
for v in self.find_all_python_versions(
major=major, minor=minor, patch=patch, pre=pre, dev=dev, arch=arch
)
),
None,
)
@versions.default
def get_versions(self):
versions = defaultdict(PathEntry)
from pythonfinder._vendor.pep514tools import environment as pep514env
env_versions = pep514env.findall()
path = None
for version_object in env_versions:
path = ensure_path(version_object.info.install_path.__getattr__(""))
try:
py_version = PythonVersion.from_windows_launcher(version_object)
except InvalidPythonVersion:
continue
self.version_list.append(py_version)
base_dir = PathEntry.create(
path,
is_root=True,
only_python=True,
pythons={py_version.comes_from.path: py_version},
)
versions[py_version.version_tuple[:5]] = base_dir
self.paths.append(base_dir)
return versions
@pythons.default
def get_pythons(self):
pythons = defaultdict()
for version in self.version_list:
_path = ensure_path(version.comes_from.path)
pythons[_path.as_posix()] = version.comes_from
return pythons
@classmethod
def create(cls):
return cls()
| true | true |
f73143b9c73994a32651383e17bb04174dee2b85 | 915 | py | Python | python/test/test_volume.py | adriangonz/seldon-deploy-sdk | c5504838630a87053387cec57ec2e1e7251971e2 | [
"Apache-2.0"
] | 6 | 2021-02-18T14:37:54.000Z | 2022-01-13T13:27:43.000Z | python/test/test_volume.py | adriangonz/seldon-deploy-sdk | c5504838630a87053387cec57ec2e1e7251971e2 | [
"Apache-2.0"
] | 14 | 2021-01-04T16:32:03.000Z | 2021-12-13T17:53:59.000Z | python/test/test_volume.py | adriangonz/seldon-deploy-sdk | c5504838630a87053387cec57ec2e1e7251971e2 | [
"Apache-2.0"
] | 7 | 2021-03-17T09:05:55.000Z | 2022-01-05T10:39:56.000Z | # coding: utf-8
"""
Seldon Deploy API
API to interact and manage the lifecycle of your machine learning models deployed through Seldon Deploy. # noqa: E501
OpenAPI spec version: v1alpha1
Contact: hello@seldon.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import seldon_deploy_sdk
from seldon_deploy_sdk.models.volume import Volume # noqa: E501
from seldon_deploy_sdk.rest import ApiException
class TestVolume(unittest.TestCase):
"""Volume unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVolume(self):
"""Test Volume"""
# FIXME: construct object with mandatory attributes with example values
# model = seldon_deploy_sdk.models.volume.Volume() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.317073 | 122 | 0.700546 |
from __future__ import absolute_import
import unittest
import seldon_deploy_sdk
from seldon_deploy_sdk.models.volume import Volume
from seldon_deploy_sdk.rest import ApiException
class TestVolume(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testVolume(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f73143ec8bc29c71a9b4d763b26b580bba6673cb | 3,254 | py | Python | logging/cloud-client/snippets.py | alexhaines123/googlecloudsqlexamples | 06d9254ec77955c02f18cd79a57cdfbd64dbf8ea | [
"Apache-2.0"
] | 2 | 2017-09-23T04:23:46.000Z | 2021-06-11T01:23:06.000Z | logging/cloud-client/snippets.py | ryanmats/python-docs-samples | 183a6186cd059c7ba24ef324614bc5fee08bff08 | [
"Apache-2.0"
] | null | null | null | logging/cloud-client/snippets.py | ryanmats/python-docs-samples | 183a6186cd059c7ba24ef324614bc5fee08bff08 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates how to perform basic operations on logs and
log entries with Stackdriver Logging.
For more information, see the README.md under /logging and the
documentation at https://cloud.google.com/logging/docs.
"""
import argparse
from gcloud import logging
def write_entry(logger_name):
"""Writes log entries to the given logger."""
logging_client = logging.Client()
# This log can be found in the Cloud Logging console under 'Custom Logs'.
logger = logging_client.logger(logger_name)
# Make a simple text log
logger.log_text('Hello, world!')
# Simple text log with severity.
logger.log_text('Goodbye, world!', severity='ERROR')
# Struct log. The struct can be any JSON-serializable dictionary.
logger.log_struct({
'name': 'King Arthur',
'quest': 'Find the Holy Grail',
'favorite_color': 'Blue'
})
print('Wrote logs to {}.'.format(logger.name))
def list_entries(logger_name):
"""Lists the most recent entries for a given logger."""
logging_client = logging.Client()
logger = logging_client.logger(logger_name)
print('Listing entries for logger {}:'.format(logger.name))
entries = []
page_token = None
while True:
new_entries, page_token = logger.list_entries(page_token=page_token)
entries.extend(new_entries)
if not page_token:
break
for entry in entries:
timestamp = entry.timestamp.isoformat()
print('* {}: {}'.format
(timestamp, entry.payload))
def delete_logger(logger_name):
"""Deletes a logger and all its entries.
Note that a deletion can take several minutes to take effect.
"""
logging_client = logging.Client()
logger = logging_client.logger(logger_name)
logger.delete()
print('Deleted all logging entries for {}'.format(logger.name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'logger_name', help='Logger name', default='example_log')
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('list', help=list_entries.__doc__)
subparsers.add_parser('write', help=write_entry.__doc__)
subparsers.add_parser('delete', help=delete_logger.__doc__)
args = parser.parse_args()
if args.command == 'list':
list_entries(args.logger_name)
elif args.command == 'write':
write_entry(args.logger_name)
elif args.command == 'delete':
delete_logger(args.logger_name)
| 30.411215 | 77 | 0.69791 |
import argparse
from gcloud import logging
def write_entry(logger_name):
logging_client = logging.Client()
logger = logging_client.logger(logger_name)
logger.log_text('Hello, world!')
logger.log_text('Goodbye, world!', severity='ERROR')
logger.log_struct({
'name': 'King Arthur',
'quest': 'Find the Holy Grail',
'favorite_color': 'Blue'
})
print('Wrote logs to {}.'.format(logger.name))
def list_entries(logger_name):
logging_client = logging.Client()
logger = logging_client.logger(logger_name)
print('Listing entries for logger {}:'.format(logger.name))
entries = []
page_token = None
while True:
new_entries, page_token = logger.list_entries(page_token=page_token)
entries.extend(new_entries)
if not page_token:
break
for entry in entries:
timestamp = entry.timestamp.isoformat()
print('* {}: {}'.format
(timestamp, entry.payload))
def delete_logger(logger_name):
logging_client = logging.Client()
logger = logging_client.logger(logger_name)
logger.delete()
print('Deleted all logging entries for {}'.format(logger.name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'logger_name', help='Logger name', default='example_log')
subparsers = parser.add_subparsers(dest='command')
subparsers.add_parser('list', help=list_entries.__doc__)
subparsers.add_parser('write', help=write_entry.__doc__)
subparsers.add_parser('delete', help=delete_logger.__doc__)
args = parser.parse_args()
if args.command == 'list':
list_entries(args.logger_name)
elif args.command == 'write':
write_entry(args.logger_name)
elif args.command == 'delete':
delete_logger(args.logger_name)
| true | true |
f73146cc31c2e9c1cdcb0e154acdd7af261b22a1 | 10,702 | py | Python | backbone/model_audio.py | rtu715/NAS-Bench-360 | d075006848c664371855c34082b0a00cda62be67 | [
"MIT"
] | 10 | 2021-06-15T17:48:34.000Z | 2022-02-23T18:34:28.000Z | backbone/model_audio.py | rtu715/NAS-Bench-360 | d075006848c664371855c34082b0a00cda62be67 | [
"MIT"
] | 1 | 2021-11-12T15:12:38.000Z | 2021-11-12T19:38:00.000Z | backbone/model_audio.py | rtu715/NAS-Bench-360 | d075006848c664371855c34082b0a00cda62be67 | [
"MIT"
] | 1 | 2021-11-15T04:07:17.000Z | 2021-11-15T04:07:17.000Z | '''
Determined model def example:
https://github.com/determined-ai/determined/tree/master/examples/computer_vision/cifar10_pytorch
'''
import tempfile
from typing import Any, Dict, Sequence, Tuple, Union, cast
from functools import partial
import os
import boto3
import numpy as np
from sklearn.metrics import average_precision_score
import torch
from torch import nn
from determined.pytorch import DataLoader, PyTorchTrial, PyTorchTrialContext, LRScheduler
from backbone_pt import Backbone_Pt, Backbone_Audio
import utils_pt
from data_utils.load_data import load_data
from data_utils.download_data import download_from_s3
from data_utils.audio_dataset import *
from data_utils.audio_dataset import _collate_fn, _collate_fn_eval
# Constants about the dataset here (need to modify)
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
def accuracy_rate(predictions: torch.Tensor, labels: torch.Tensor) -> float:
"""Return the accuracy rate based on dense predictions and sparse labels."""
assert len(predictions) == len(labels), "Predictions and labels must have the same length."
assert len(labels.shape) == 1, "Labels must be a column vector."
return ( # type: ignore
float((predictions.argmax(1) == labels.to(torch.long)).sum()) / predictions.shape[0]
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class BackboneTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
#self.results = {"loss": float("inf"), "top1_accuracy": 0, "top5_accuracy": 0, "test_loss": float("inf"),
# "test_top1_accuracy": 0, "test_top5_accuracy": 0}
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3),
'smnist': (10, 1), 'cifar100':(100, 3), 'scifar100': (100, 3),
'audio': (200, 1)}
n_classes, in_channels = dataset_hypers[self.hparams.task]
print('task: ', self.hparams.task, 'in_channels: ', in_channels, 'classes: ', n_classes)
# Changing our backbone
depth = list(map(int, self.hparams.backbone.split(',')))[0]
width = list(map(int, self.hparams.backbone.split(',')))[1]
#for audio, use multilabel loss
if self.hparams.task == 'audio':
# where is the weights file?
self.criterion = nn.BCEWithLogitsLoss().cuda()
self.backbone = Backbone_Audio(depth, n_classes, width,
dropRate=self.hparams.droprate, in_channels=in_channels)
else:
self.criterion = nn.CrossEntropyLoss().cuda()
self.backbone = Backbone_Pt(
depth,
n_classes,
width,
dropRate=self.hparams.droprate,
in_channels=in_channels,
)
total_params = sum(p.numel() for p in self.backbone.parameters() if p.requires_grad)/ 1e6
print('Parameter size in MB(backbone): ', total_params)
self.model = self.context.wrap_model(self.backbone)
self.last_eval = 0
'''
Definition of optimizer
'''
nesterov = self.hparams.nesterov if self.hparams.momentum else False
self.opt = self.context.wrap_optimizer(torch.optim.SGD(
self.model.parameters(),
lr=self.hparams.learning_rate,
momentum=self.hparams.momentum,
weight_decay=self.hparams.weight_decay,
nesterov=nesterov)
)
self.lr_scheduler = self.context.wrap_lr_scheduler(
lr_scheduler=torch.optim.lr_scheduler.LambdaLR(
self.opt,
lr_lambda=self.weight_sched,
last_epoch=self.hparams.start_epoch - 1
),
step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH,
)
def weight_sched(self, epoch) -> Any:
if self.hparams.epochs != 200:
return 0.2 ** (epoch >= int(0.3 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.6 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.8 * self.hparams.epochs))
#print('using original weight schedule')
return 0.2 ** (epoch >= 60) * 0.2 ** (epoch >= 120) * 0.2 ** (epoch >=160)
def download_data_from_s3(self):
'''Download data from s3 to store in temp directory'''
s3_bucket = self.context.get_data_config()["bucket"]
#download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
#download_directory = "/tmp/data"
download_directory = os.getcwd()
s3 = boto3.client("s3")
#os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if self.hparams.train:
self.train_data, self.val_data, self.test_data = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
self.build_test_data_loader(download_directory)
else:
self.train_data, _, self.val_data = load_data(self.hparams.task, download_directory, False, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
trainset = self.train_data
print(len(trainset))
train_loader = DataLoader(trainset, num_workers=4, batch_size=self.context.get_per_slot_batch_size(),
shuffle=True, sampler=None, collate_fn=_collate_fn,
pin_memory=False, drop_last=True)
print(len(train_loader))
return train_loader
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
print(len(valset))
return DataLoader(valset, sampler=None, num_workers=4,
collate_fn=_collate_fn_eval,
shuffle=False, batch_size=1,
pin_memory=False
)
def build_test_data_loader(self, download_directory):
testset = self.test_data
print(len(testset))
#self.test_loader = torch.utils.data.DataLoader(testset, batch_size=self.context.get_per_slot_batch_size(),
# shuffle=False, num_workers=2)
return
'''
Train and Evaluate Methods
'''
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
x_train, _, y_train = batch
self.model.train()
output = self.model(x_train)
loss = self.criterion(output, y_train)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {
'loss': loss,
}
def evaluate_full_dataset(
self, data_loader: torch.utils.data.DataLoader,
) -> Dict[str, Any]:
if not self.hparams.train and self.hparams.task == 'audio':
return self.evaluate_audio_testset(self.val_data)
loss_avg = utils_pt.AverageMeter()
val_predictions = []
val_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
logits = logits.mean(0).unsqueeze(0)
loss = self.criterion(logits, target)
#top1, top5 = utils_pt.accuracy(logits, target, topk=(1, 5))
#acc_top1.update(top1.item(), n)
#acc_top5.update(top5.item(), n)
loss_avg.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0])
val_gts.append(target.detach().cpu().numpy()[0])
val_preds = np.asarray(val_predictions).astype('float32')
val_gts = np.asarray(val_gts).astype('int32')
map_value = average_precision_score(val_gts, val_preds, average="macro")
results = {
"loss": loss_avg.avg,
"val_mAP": map_value,
}
'''
if self.hparams.train:
test_acc_top1 = utils_pt.AverageMeter()
test_acc_top5 = utils_pt.AverageMeter()
test_loss = utils_pt.AverageMeter()
with torch.no_grad():
for batch in self.test_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
loss = self.criterion(logits, target)
top1, top5 = utils_pt.accuracy(logits, target, topk=(1, 5))
test_acc_top1.update(top1.item(), n)
test_acc_top5.update(top5.item(), n)
test_loss.update(loss, n)
results2 = {
"test_loss": test_loss.avg,
"test_top1_accuracy": test_acc_top1.avg,
"test_top5_accuracy": test_acc_top5.avg,
}
results.update(results2)
'''
if self.hparams.task == 'audio' and self.last_eval % 20 == 0:
results.update(self.evaluate_audio_testset(self.test_data))
self.last_eval += 1
return results
def evaluate_audio_testset(self, testset) -> Dict[str, torch.Tensor]:
cnt = 0
test_predictions = []
test_gts = []
for ix in range(testset.len):
with torch.no_grad():
batch = testset[ix]
x, y = batch
x = x.cuda()
y_pred = self.model(x)
y_pred = y_pred.mean(0).unsqueeze(0)
sigmoid_preds = torch.sigmoid(y_pred)
test_predictions.append(sigmoid_preds.detach().cpu().numpy()[0])
test_gts.append(y.detach().cpu().numpy()[0]) # drop batch axis
test_predictions = np.asarray(test_predictions).astype('float32')
test_gts = np.asarray(test_gts).astype('int32')
stats = calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {
"test_mAUC": mAUC,
"test_mAP": mAP,
}
return results
| 37.289199 | 167 | 0.594749 | import tempfile
from typing import Any, Dict, Sequence, Tuple, Union, cast
from functools import partial
import os
import boto3
import numpy as np
from sklearn.metrics import average_precision_score
import torch
from torch import nn
from determined.pytorch import DataLoader, PyTorchTrial, PyTorchTrialContext, LRScheduler
from backbone_pt import Backbone_Pt, Backbone_Audio
import utils_pt
from data_utils.load_data import load_data
from data_utils.download_data import download_from_s3
from data_utils.audio_dataset import *
from data_utils.audio_dataset import _collate_fn, _collate_fn_eval
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
def accuracy_rate(predictions: torch.Tensor, labels: torch.Tensor) -> float:
assert len(predictions) == len(labels), "Predictions and labels must have the same length."
assert len(labels.shape) == 1, "Labels must be a column vector."
return (
float((predictions.argmax(1) == labels.to(torch.long)).sum()) / predictions.shape[0]
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class BackboneTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
dataset_hypers = {'sEMG': (7, 1), 'ninapro': (18, 1), 'cifar10': (10, 3),
'smnist': (10, 1), 'cifar100':(100, 3), 'scifar100': (100, 3),
'audio': (200, 1)}
n_classes, in_channels = dataset_hypers[self.hparams.task]
print('task: ', self.hparams.task, 'in_channels: ', in_channels, 'classes: ', n_classes)
depth = list(map(int, self.hparams.backbone.split(',')))[0]
width = list(map(int, self.hparams.backbone.split(',')))[1]
if self.hparams.task == 'audio':
self.criterion = nn.BCEWithLogitsLoss().cuda()
self.backbone = Backbone_Audio(depth, n_classes, width,
dropRate=self.hparams.droprate, in_channels=in_channels)
else:
self.criterion = nn.CrossEntropyLoss().cuda()
self.backbone = Backbone_Pt(
depth,
n_classes,
width,
dropRate=self.hparams.droprate,
in_channels=in_channels,
)
total_params = sum(p.numel() for p in self.backbone.parameters() if p.requires_grad)/ 1e6
print('Parameter size in MB(backbone): ', total_params)
self.model = self.context.wrap_model(self.backbone)
self.last_eval = 0
nesterov = self.hparams.nesterov if self.hparams.momentum else False
self.opt = self.context.wrap_optimizer(torch.optim.SGD(
self.model.parameters(),
lr=self.hparams.learning_rate,
momentum=self.hparams.momentum,
weight_decay=self.hparams.weight_decay,
nesterov=nesterov)
)
self.lr_scheduler = self.context.wrap_lr_scheduler(
lr_scheduler=torch.optim.lr_scheduler.LambdaLR(
self.opt,
lr_lambda=self.weight_sched,
last_epoch=self.hparams.start_epoch - 1
),
step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH,
)
def weight_sched(self, epoch) -> Any:
if self.hparams.epochs != 200:
return 0.2 ** (epoch >= int(0.3 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.6 * self.hparams.epochs)) * 0.2 ** (epoch > int(0.8 * self.hparams.epochs))
return 0.2 ** (epoch >= 60) * 0.2 ** (epoch >= 120) * 0.2 ** (epoch >=160)
def download_data_from_s3(self):
s3_bucket = self.context.get_data_config()["bucket"]
download_directory = os.getcwd()
s3 = boto3.client("s3")
download_from_s3(s3_bucket, self.hparams.task, download_directory)
if self.hparams.train:
self.train_data, self.val_data, self.test_data = load_data(self.hparams.task, download_directory, True, self.hparams.permute)
self.build_test_data_loader(download_directory)
else:
self.train_data, _, self.val_data = load_data(self.hparams.task, download_directory, False, self.hparams.permute)
return download_directory
def build_training_data_loader(self) -> DataLoader:
trainset = self.train_data
print(len(trainset))
train_loader = DataLoader(trainset, num_workers=4, batch_size=self.context.get_per_slot_batch_size(),
shuffle=True, sampler=None, collate_fn=_collate_fn,
pin_memory=False, drop_last=True)
print(len(train_loader))
return train_loader
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
print(len(valset))
return DataLoader(valset, sampler=None, num_workers=4,
collate_fn=_collate_fn_eval,
shuffle=False, batch_size=1,
pin_memory=False
)
def build_test_data_loader(self, download_directory):
testset = self.test_data
print(len(testset))
return
def train_batch(self, batch: TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
x_train, _, y_train = batch
self.model.train()
output = self.model(x_train)
loss = self.criterion(output, y_train)
self.context.backward(loss)
self.context.step_optimizer(self.opt)
return {
'loss': loss,
}
def evaluate_full_dataset(
self, data_loader: torch.utils.data.DataLoader,
) -> Dict[str, Any]:
if not self.hparams.train and self.hparams.task == 'audio':
return self.evaluate_audio_testset(self.val_data)
loss_avg = utils_pt.AverageMeter()
val_predictions = []
val_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
logits = logits.mean(0).unsqueeze(0)
loss = self.criterion(logits, target)
loss_avg.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
val_predictions.append(logits_sigmoid.detach().cpu().numpy()[0])
val_gts.append(target.detach().cpu().numpy()[0])
val_preds = np.asarray(val_predictions).astype('float32')
val_gts = np.asarray(val_gts).astype('int32')
map_value = average_precision_score(val_gts, val_preds, average="macro")
results = {
"loss": loss_avg.avg,
"val_mAP": map_value,
}
if self.hparams.task == 'audio' and self.last_eval % 20 == 0:
results.update(self.evaluate_audio_testset(self.test_data))
self.last_eval += 1
return results
def evaluate_audio_testset(self, testset) -> Dict[str, torch.Tensor]:
cnt = 0
test_predictions = []
test_gts = []
for ix in range(testset.len):
with torch.no_grad():
batch = testset[ix]
x, y = batch
x = x.cuda()
y_pred = self.model(x)
y_pred = y_pred.mean(0).unsqueeze(0)
sigmoid_preds = torch.sigmoid(y_pred)
test_predictions.append(sigmoid_preds.detach().cpu().numpy()[0])
test_gts.append(y.detach().cpu().numpy()[0])
test_predictions = np.asarray(test_predictions).astype('float32')
test_gts = np.asarray(test_gts).astype('int32')
stats = calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {
"test_mAUC": mAUC,
"test_mAP": mAP,
}
return results
| true | true |
f73148848e36430282de1201af2441bcc49e6a9f | 43,486 | py | Python | utils/jsonrpc.py | wanghm92/graph-2-text | 050e56b059cb25e1223e54aeec180e8e8fba9637 | [
"MIT"
] | 149 | 2018-10-24T21:39:44.000Z | 2022-01-27T15:47:48.000Z | utils/jsonrpc.py | wanghm92/graph-2-text | 050e56b059cb25e1223e54aeec180e8e8fba9637 | [
"MIT"
] | 12 | 2018-12-09T01:06:52.000Z | 2021-02-19T10:48:40.000Z | utils/jsonrpc.py | wanghm92/graph-2-text | 050e56b059cb25e1223e54aeec180e8e8fba9637 | [
"MIT"
] | 32 | 2018-10-25T00:52:12.000Z | 2021-12-07T15:24:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
JSON-RPC (remote procedure call).
It consists of 3 (independent) parts:
- proxy/dispatcher
- data structure / serializer
- transport
It's intended for JSON-RPC, but since the above 3 parts are independent,
it could be used for other RPCs as well.
Currently, JSON-RPC 2.0(pre) and JSON-RPC 1.0 are implemented
:Version: 2008-08-31-beta
:Status: experimental
:Example:
simple Client with JsonRPC2.0 and TCP/IP::
>>> proxy = ServerProxy( JsonRpc20(), TransportTcpIp(addr=("127.0.0.1",31415)) )
>>> proxy.echo( "hello world" )
u'hello world'
>>> proxy.echo( "bye." )
u'bye.'
simple Server with JsonRPC2.0 and TCP/IP with logging to STDOUT::
>>> server = Server( JsonRpc20(), TransportTcpIp(addr=("127.0.0.1",31415), logfunc=log_stdout) )
>>> def echo( s ):
... return s
>>> server.register_function( echo )
>>> server.serve( 2 ) # serve 2 requests # doctest: +ELLIPSIS
listen ('127.0.0.1', 31415)
('127.0.0.1', ...) connected
('127.0.0.1', ...) <-- {"jsonrpc": "2.0", "method": "echo", "params": ["hello world"], "id": 0}
('127.0.0.1', ...) --> {"jsonrpc": "2.0", "result": "hello world", "id": 0}
('127.0.0.1', ...) close
('127.0.0.1', ...) connected
('127.0.0.1', ...) <-- {"jsonrpc": "2.0", "method": "echo", "params": ["bye."], "id": 0}
('127.0.0.1', ...) --> {"jsonrpc": "2.0", "result": "bye.", "id": 0}
('127.0.0.1', ...) close
close ('127.0.0.1', 31415)
Client with JsonRPC2.0 and an abstract Unix Domain Socket::
>>> proxy = ServerProxy( JsonRpc20(), TransportUnixSocket(addr="\\x00.rpcsocket") )
>>> proxy.hi( message="hello" ) #named parameters
u'hi there'
>>> proxy.test() #fault
Traceback (most recent call last):
...
jsonrpc.RPCMethodNotFound: <RPCFault -32601: u'Method not found.' (None)>
>>> proxy.debug.echo( "hello world" ) #hierarchical procedures
u'hello world'
Server with JsonRPC2.0 and abstract Unix Domain Socket with a logfile::
>>> server = Server( JsonRpc20(), TransportUnixSocket(addr="\\x00.rpcsocket", logfunc=log_file("mylog.txt")) )
>>> def echo( s ):
... return s
>>> def hi( message ):
... return "hi there"
>>> server.register_function( hi )
>>> server.register_function( echo, name="debug.echo" )
>>> server.serve( 3 ) # serve 3 requests
"mylog.txt" then contains:
listen '\\x00.rpcsocket'
'' connected
'' --> '{"jsonrpc": "2.0", "method": "hi", "params": {"message": "hello"}, "id": 0}'
'' <-- '{"jsonrpc": "2.0", "result": "hi there", "id": 0}'
'' close
'' connected
'' --> '{"jsonrpc": "2.0", "method": "test", "id": 0}'
'' <-- '{"jsonrpc": "2.0", "error": {"code":-32601, "message": "Method not found."}, "id": 0}'
'' close
'' connected
'' --> '{"jsonrpc": "2.0", "method": "debug.echo", "params": ["hello world"], "id": 0}'
'' <-- '{"jsonrpc": "2.0", "result": "hello world", "id": 0}'
'' close
close '\\x00.rpcsocket'
:Note: all exceptions derived from RPCFault are propagated to the client.
other exceptions are logged and result in a sent-back "empty" INTERNAL_ERROR.
:Uses: simplejson, socket, sys,time,codecs
:SeeAlso: JSON-RPC 2.0 proposal, 1.0 specification
:Warning:
.. Warning::
This is **experimental** code!
:Bug:
:Author: Roland Koebler (rk(at)simple-is-better.org)
:Copyright: 2007-2008 by Roland Koebler (rk(at)simple-is-better.org)
:License: see __license__
:Changelog:
- 2008-08-31: 1st release
TODO:
- server: multithreading rpc-server
- client: multicall (send several requests)
- transport: SSL sockets, maybe HTTP, HTTPS
- types: support for date/time (ISO 8601)
- errors: maybe customizable error-codes/exceptions
- mixed 1.0/2.0 server ?
- system description etc. ?
- maybe test other json-serializers, like cjson?
"""
__version__ = "2008-08-31-beta"
__author__ = "Roland Koebler <rk(at)simple-is-better.org>"
__license__ = """Copyright (c) 2007-2008 by Roland Koebler (rk(at)simple-is-better.org)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
#=========================================
#import
import sys
try:
import json
except ImportError:
import simplejson as json
#=========================================
# errors
#----------------------
# error-codes + exceptions
#JSON-RPC 2.0 error-codes
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_METHOD_PARAMS = -32602 #invalid number/type of parameters
INTERNAL_ERROR = -32603 #"all other errors"
#additional error-codes
PROCEDURE_EXCEPTION = -32000
AUTHENTIFICATION_ERROR = -32001
PERMISSION_DENIED = -32002
INVALID_PARAM_VALUES = -32003
#human-readable messages
ERROR_MESSAGE = {
PARSE_ERROR : "Parse error.",
INVALID_REQUEST : "Invalid Request.",
METHOD_NOT_FOUND : "Method not found.",
INVALID_METHOD_PARAMS : "Invalid parameters.",
INTERNAL_ERROR : "Internal error.",
PROCEDURE_EXCEPTION : "Procedure exception.",
AUTHENTIFICATION_ERROR : "Authentification error.",
PERMISSION_DENIED : "Permission denied.",
INVALID_PARAM_VALUES: "Invalid parameter values."
}
#----------------------
# exceptions
class RPCError(Exception):
"""Base class for rpc-errors."""
class RPCTransportError(RPCError):
"""Transport error."""
class RPCTimeoutError(RPCTransportError):
"""Transport/reply timeout."""
class RPCFault(RPCError):
"""RPC error/fault package received.
This exception can also be used as a class, to generate a
RPC-error/fault message.
:Variables:
- error_code: the RPC error-code
- error_string: description of the error
- error_data: optional additional information
(must be json-serializable)
:TODO: improve __str__
"""
def __init__(self, error_code, error_message, error_data=None):
RPCError.__init__(self)
self.error_code = error_code
self.error_message = error_message
self.error_data = error_data
def __str__(self):
return repr(self)
def __repr__(self):
return( "<RPCFault %s: %s (%s)>" % (self.error_code, repr(self.error_message), repr(self.error_data)) )
class RPCParseError(RPCFault):
"""Broken rpc-package. (PARSE_ERROR)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, PARSE_ERROR, ERROR_MESSAGE[PARSE_ERROR], error_data)
class RPCInvalidRPC(RPCFault):
"""Invalid rpc-package. (INVALID_REQUEST)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INVALID_REQUEST, ERROR_MESSAGE[INVALID_REQUEST], error_data)
class RPCMethodNotFound(RPCFault):
"""Method not found. (METHOD_NOT_FOUND)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, METHOD_NOT_FOUND, ERROR_MESSAGE[METHOD_NOT_FOUND], error_data)
class RPCInvalidMethodParams(RPCFault):
"""Invalid method-parameters. (INVALID_METHOD_PARAMS)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INVALID_METHOD_PARAMS, ERROR_MESSAGE[INVALID_METHOD_PARAMS], error_data)
class RPCInternalError(RPCFault):
"""Internal error. (INTERNAL_ERROR)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], error_data)
class RPCProcedureException(RPCFault):
"""Procedure exception. (PROCEDURE_EXCEPTION)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, PROCEDURE_EXCEPTION, ERROR_MESSAGE[PROCEDURE_EXCEPTION], error_data)
class RPCAuthentificationError(RPCFault):
"""AUTHENTIFICATION_ERROR"""
def __init__(self, error_data=None):
RPCFault.__init__(self, AUTHENTIFICATION_ERROR, ERROR_MESSAGE[AUTHENTIFICATION_ERROR], error_data)
class RPCPermissionDenied(RPCFault):
"""PERMISSION_DENIED"""
def __init__(self, error_data=None):
RPCFault.__init__(self, PERMISSION_DENIED, ERROR_MESSAGE[PERMISSION_DENIED], error_data)
class RPCInvalidParamValues(RPCFault):
"""INVALID_PARAM_VALUES"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INVALID_PARAM_VALUES, ERROR_MESSAGE[INVALID_PARAM_VALUES], error_data)
#=========================================
# data structure / serializer
#----------------------
#
def dictkeyclean(d):
"""Convert all keys of the dict 'd' to (ascii-)strings.
:Raises: UnicodeEncodeError
"""
new_d = {}
for (k, v) in d.iteritems():
new_d[str(k)] = v
return new_d
#----------------------
# JSON-RPC 1.0
class JsonRpc10:
"""JSON-RPC V1.0 data-structure / serializer
This implementation is quite liberal in what it accepts: It treats
missing "params" and "id" in Requests and missing "result"/"error" in
Responses as empty/null.
:SeeAlso: JSON-RPC 1.0 specification
:TODO: catch simplejson.dumps not-serializable-exceptions
"""
def __init__(self, dumps=json.dumps, loads=json.loads):
"""init: set serializer to use
:Parameters:
- dumps: json-encoder-function
- loads: json-decoder-function
:Note: The dumps_* functions of this class already directly create
the invariant parts of the resulting json-object themselves,
without using the given json-encoder-function.
"""
self.dumps = dumps
self.loads = loads
def dumps_request( self, method, params=(), id=0 ):
"""serialize JSON-RPC-Request
:Parameters:
- method: the method-name (str/unicode)
- params: the parameters (list/tuple)
- id: if id=None, this results in a Notification
:Returns: | {"method": "...", "params": ..., "id": ...}
| "method", "params" and "id" are always in this order.
:Raises: TypeError if method/params is of wrong type or
not JSON-serializable
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list)):
raise TypeError("params must be a tuple/list.")
return '{"method": %s, "params": %s, "id": %s}' % \
(self.dumps(method), self.dumps(params), self.dumps(id))
def dumps_notification( self, method, params=() ):
"""serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"method": "...", "params": ..., "id": null}
| "method", "params" and "id" are always in this order.
:Raises: see dumps_request
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list)):
raise TypeError("params must be a tuple/list.")
return '{"method": %s, "params": %s, "id": null}' % \
(self.dumps(method), self.dumps(params))
def dumps_response( self, result, id=None ):
"""serialize a JSON-RPC-Response (without error)
:Returns: | {"result": ..., "error": null, "id": ...}
| "result", "error" and "id" are always in this order.
:Raises: TypeError if not JSON-serializable
"""
return '{"result": %s, "error": null, "id": %s}' % \
(self.dumps(result), self.dumps(id))
def dumps_error( self, error, id=None ):
"""serialize a JSON-RPC-Response-error
Since JSON-RPC 1.0 does not define an error-object, this uses the
JSON-RPC 2.0 error-object.
:Parameters:
- error: a RPCFault instance
:Returns: | {"result": null, "error": {"code": error_code, "message": error_message, "data": error_data}, "id": ...}
| "result", "error" and "id" are always in this order, data is omitted if None.
:Raises: ValueError if error is not a RPCFault instance,
TypeError if not JSON-serializable
"""
if not isinstance(error, RPCFault):
raise ValueError("""error must be a RPCFault-instance.""")
if error.error_data is None:
return '{"result": null, "error": {"code":%s, "message": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(id))
else:
return '{"result": null, "error": {"code":%s, "message": %s, "data": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(error.error_data), self.dumps(id))
def loads_request( self, string ):
"""de-serialize a JSON-RPC Request/Notification
:Returns: | [method_name, params, id] or [method_name, params]
| params is a tuple/list
| if id is missing, this is a Notification
:Raises: RPCParseError, RPCInvalidRPC, RPCInvalidMethodParams
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "method" not in data: raise RPCInvalidRPC("""Invalid Request, "method" is missing.""")
if not isinstance(data["method"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Request, "method" must be a string.""")
if "id" not in data: data["id"] = None #be liberal
if "params" not in data: data["params"] = () #be liberal
if not isinstance(data["params"], (list, tuple)):
raise RPCInvalidRPC("""Invalid Request, "params" must be an array.""")
if len(data) != 3: raise RPCInvalidRPC("""Invalid Request, additional fields found.""")
# notification / request
if data["id"] is None:
return data["method"], data["params"] #notification
else:
return data["method"], data["params"], data["id"] #request
def loads_response( self, string ):
"""de-serialize a JSON-RPC Response/error
:Returns: | [result, id] for Responses
:Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC
| Note that for error-packages which do not match the
V2.0-definition, RPCFault(-1, "Error", RECEIVED_ERROR_OBJ)
is raised.
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "id" not in data: raise RPCInvalidRPC("""Invalid Response, "id" missing.""")
if "result" not in data: data["result"] = None #be liberal
if "error" not in data: data["error"] = None #be liberal
if len(data) != 3: raise RPCInvalidRPC("""Invalid Response, additional or missing fields.""")
#error
if data["error"] is not None:
if data["result"] is not None:
raise RPCInvalidRPC("""Invalid Response, one of "result" or "error" must be null.""")
#v2.0 error-format
if( isinstance(data["error"], dict) and "code" in data["error"] and "message" in data["error"] and
(len(data["error"])==2 or ("data" in data["error"] and len(data["error"])==3)) ):
if "data" not in data["error"]:
error_data = None
else:
error_data = data["error"]["data"]
if data["error"]["code"] == PARSE_ERROR:
raise RPCParseError(error_data)
elif data["error"]["code"] == INVALID_REQUEST:
raise RPCInvalidRPC(error_data)
elif data["error"]["code"] == METHOD_NOT_FOUND:
raise RPCMethodNotFound(error_data)
elif data["error"]["code"] == INVALID_METHOD_PARAMS:
raise RPCInvalidMethodParams(error_data)
elif data["error"]["code"] == INTERNAL_ERROR:
raise RPCInternalError(error_data)
elif data["error"]["code"] == PROCEDURE_EXCEPTION:
raise RPCProcedureException(error_data)
elif data["error"]["code"] == AUTHENTIFICATION_ERROR:
raise RPCAuthentificationError(error_data)
elif data["error"]["code"] == PERMISSION_DENIED:
raise RPCPermissionDenied(error_data)
elif data["error"]["code"] == INVALID_PARAM_VALUES:
raise RPCInvalidParamValues(error_data)
else:
raise RPCFault(data["error"]["code"], data["error"]["message"], error_data)
#other error-format
else:
raise RPCFault(-1, "Error", data["error"])
#result
else:
return data["result"], data["id"]
#----------------------
# JSON-RPC 2.0
class JsonRpc20:
"""JSON-RPC V2.0 data-structure / serializer
:SeeAlso: JSON-RPC 2.0 specification
:TODO: catch simplejson.dumps not-serializable-exceptions
"""
def __init__(self, dumps=json.dumps, loads=json.loads):
"""init: set serializer to use
:Parameters:
- dumps: json-encoder-function
- loads: json-decoder-function
:Note: The dumps_* functions of this class already directly create
the invariant parts of the resulting json-object themselves,
without using the given json-encoder-function.
"""
self.dumps = dumps
self.loads = loads
def dumps_request( self, method, params=(), id=0 ):
"""serialize JSON-RPC-Request
:Parameters:
- method: the method-name (str/unicode)
- params: the parameters (list/tuple/dict)
- id: the id (should not be None)
:Returns: | {"jsonrpc": "2.0", "method": "...", "params": ..., "id": ...}
| "jsonrpc", "method", "params" and "id" are always in this order.
| "params" is omitted if empty
:Raises: TypeError if method/params is of wrong type or
not JSON-serializable
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list, dict)):
raise TypeError("params must be a tuple/list/dict or None.")
if params:
return '{"jsonrpc": "2.0", "method": %s, "params": %s, "id": %s}' % \
(self.dumps(method), self.dumps(params), self.dumps(id))
else:
return '{"jsonrpc": "2.0", "method": %s, "id": %s}' % \
(self.dumps(method), self.dumps(id))
def dumps_notification( self, method, params=() ):
"""serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"jsonrpc": "2.0", "method": "...", "params": ...}
| "jsonrpc", "method" and "params" are always in this order.
:Raises: see dumps_request
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list, dict)):
raise TypeError("params must be a tuple/list/dict or None.")
if params:
return '{"jsonrpc": "2.0", "method": %s, "params": %s}' % \
(self.dumps(method), self.dumps(params))
else:
return '{"jsonrpc": "2.0", "method": %s}' % \
(self.dumps(method))
def dumps_response( self, result, id=None ):
"""serialize a JSON-RPC-Response (without error)
:Returns: | {"jsonrpc": "2.0", "result": ..., "id": ...}
| "jsonrpc", "result", and "id" are always in this order.
:Raises: TypeError if not JSON-serializable
"""
return '{"jsonrpc": "2.0", "result": %s, "id": %s}' % \
(self.dumps(result), self.dumps(id))
def dumps_error( self, error, id=None ):
"""serialize a JSON-RPC-Response-error
:Parameters:
- error: a RPCFault instance
:Returns: | {"jsonrpc": "2.0", "error": {"code": error_code, "message": error_message, "data": error_data}, "id": ...}
| "jsonrpc", "result", "error" and "id" are always in this order, data is omitted if None.
:Raises: ValueError if error is not a RPCFault instance,
TypeError if not JSON-serializable
"""
if not isinstance(error, RPCFault):
raise ValueError("""error must be a RPCFault-instance.""")
if error.error_data is None:
return '{"jsonrpc": "2.0", "error": {"code":%s, "message": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(id))
else:
return '{"jsonrpc": "2.0", "error": {"code":%s, "message": %s, "data": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(error.error_data), self.dumps(id))
def loads_request( self, string ):
"""de-serialize a JSON-RPC Request/Notification
:Returns: | [method_name, params, id] or [method_name, params]
| params is a tuple/list or dict (with only str-keys)
| if id is missing, this is a Notification
:Raises: RPCParseError, RPCInvalidRPC, RPCInvalidMethodParams
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "jsonrpc" not in data: raise RPCInvalidRPC("""Invalid Response, "jsonrpc" missing.""")
if not isinstance(data["jsonrpc"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Response, "jsonrpc" must be a string.""")
if data["jsonrpc"] != "2.0": raise RPCInvalidRPC("""Invalid jsonrpc version.""")
if "method" not in data: raise RPCInvalidRPC("""Invalid Request, "method" is missing.""")
if not isinstance(data["method"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Request, "method" must be a string.""")
if "params" not in data: data["params"] = ()
#convert params-keys from unicode to str
elif isinstance(data["params"], dict):
try:
data["params"] = dictkeyclean(data["params"])
except UnicodeEncodeError:
raise RPCInvalidMethodParams("Parameter-names must be in ascii.")
elif not isinstance(data["params"], (list, tuple)):
raise RPCInvalidRPC("""Invalid Request, "params" must be an array or object.""")
if not( len(data)==3 or ("id" in data and len(data)==4) ):
raise RPCInvalidRPC("""Invalid Request, additional fields found.""")
# notification / request
if "id" not in data:
return data["method"], data["params"] #notification
else:
return data["method"], data["params"], data["id"] #request
def loads_response( self, string ):
"""de-serialize a JSON-RPC Response/error
:Returns: | [result, id] for Responses
:Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "jsonrpc" not in data: raise RPCInvalidRPC("""Invalid Response, "jsonrpc" missing.""")
if not isinstance(data["jsonrpc"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Response, "jsonrpc" must be a string.""")
if data["jsonrpc"] != "2.0": raise RPCInvalidRPC("""Invalid jsonrpc version.""")
if "id" not in data: raise RPCInvalidRPC("""Invalid Response, "id" missing.""")
if "result" not in data: data["result"] = None
if "error" not in data: data["error"] = None
if len(data) != 4: raise RPCInvalidRPC("""Invalid Response, additional or missing fields.""")
#error
if data["error"] is not None:
if data["result"] is not None:
raise RPCInvalidRPC("""Invalid Response, only "result" OR "error" allowed.""")
if not isinstance(data["error"], dict): raise RPCInvalidRPC("Invalid Response, invalid error-object.")
if "code" not in data["error"] or "message" not in data["error"]:
raise RPCInvalidRPC("Invalid Response, invalid error-object.")
if "data" not in data["error"]: data["error"]["data"] = None
if len(data["error"]) != 3:
raise RPCInvalidRPC("Invalid Response, invalid error-object.")
error_data = data["error"]["data"]
if data["error"]["code"] == PARSE_ERROR:
raise RPCParseError(error_data)
elif data["error"]["code"] == INVALID_REQUEST:
raise RPCInvalidRPC(error_data)
elif data["error"]["code"] == METHOD_NOT_FOUND:
raise RPCMethodNotFound(error_data)
elif data["error"]["code"] == INVALID_METHOD_PARAMS:
raise RPCInvalidMethodParams(error_data)
elif data["error"]["code"] == INTERNAL_ERROR:
raise RPCInternalError(error_data)
elif data["error"]["code"] == PROCEDURE_EXCEPTION:
raise RPCProcedureException(error_data)
elif data["error"]["code"] == AUTHENTIFICATION_ERROR:
raise RPCAuthentificationError(error_data)
elif data["error"]["code"] == PERMISSION_DENIED:
raise RPCPermissionDenied(error_data)
elif data["error"]["code"] == INVALID_PARAM_VALUES:
raise RPCInvalidParamValues(error_data)
else:
raise RPCFault(data["error"]["code"], data["error"]["message"], error_data)
#result
else:
return data["result"], data["id"]
#=========================================
# transports
#----------------------
# transport-logging
import codecs
import time
def log_dummy( message ):
"""dummy-logger: do nothing"""
pass
def log_stdout( message ):
"""print message to STDOUT"""
print message
def log_file( filename ):
"""return a logfunc which logs to a file (in utf-8)"""
def logfile( message ):
f = codecs.open( filename, 'a', encoding='utf-8' )
f.write( message+"\n" )
f.close()
return logfile
def log_filedate( filename ):
"""return a logfunc which logs date+message to a file (in utf-8)"""
def logfile( message ):
f = codecs.open( filename, 'a', encoding='utf-8' )
f.write( time.strftime("%Y-%m-%d %H:%M:%S ")+message+"\n" )
f.close()
return logfile
#----------------------
class Transport:
"""generic Transport-interface.
This class, and especially its methods and docstrings,
define the Transport-Interface.
"""
def __init__(self):
pass
def send( self, data ):
"""send all data. must be implemented by derived classes."""
raise NotImplementedError
def recv( self ):
"""receive data. must be implemented by derived classes."""
raise NotImplementedError
def sendrecv( self, string ):
"""send + receive data"""
self.send( string )
return self.recv()
def serve( self, handler, n=None ):
"""serve (forever or for n communicaions).
- receive data
- call result = handler(data)
- send back result if not None
The serving can be stopped by SIGINT.
:TODO:
- how to stop?
maybe use a .run-file, and stop server if file removed?
- maybe make n_current accessible? (e.g. for logging)
"""
n_current = 0
while 1:
if n is not None and n_current >= n:
break
data = self.recv()
result = handler(data)
if result is not None:
self.send( result )
n_current += 1
class TransportSTDINOUT(Transport):
"""receive from STDIN, send to STDOUT.
Useful e.g. for debugging.
"""
def send(self, string):
"""write data to STDOUT with '***SEND:' prefix """
print "***SEND:"
print string
def recv(self):
"""read data from STDIN"""
print "***RECV (please enter, ^D ends.):"
return sys.stdin.read()
import socket, select
class TransportSocket(Transport):
"""Transport via socket.
:SeeAlso: python-module socket
:TODO:
- documentation
- improve this (e.g. make sure that connections are closed, socket-files are deleted etc.)
- exception-handling? (socket.error)
"""
#def __init__( self, addr, limit=4096, sock_type=socket.AF_INET, sock_prot=socket.SOCK_STREAM, timeout=5.0, logfunc=log_dummy ):
def __init__( self, addr, limit=44096, sock_type=socket.AF_INET, sock_prot=socket.SOCK_STREAM, timeout=500.0, logfunc=log_dummy ):
"""
:Parameters:
- addr: socket-address
- timeout: timeout in seconds
- logfunc: function for logging, logfunc(message)
:Raises: socket.timeout after timeout
"""
self.limit = limit
self.addr = addr
self.s_type = sock_type
self.s_prot = sock_prot
self.s = None
self.timeout = timeout
self.log = logfunc
def connect( self ):
self.close()
self.log( "connect to %s" % repr(self.addr) )
self.s = socket.socket( self.s_type, self.s_prot )
self.s.settimeout( self.timeout )
self.s.connect( self.addr )
def close( self ):
if self.s is not None:
self.log( "close %s" % repr(self.addr) )
self.s.close()
self.s = None
def __repr__(self):
return "<TransportSocket, %s>" % repr(self.addr)
def send( self, string ):
if self.s is None:
self.connect()
self.log( "--> "+repr(string) )
self.s.sendall( string )
def recv( self ):
if self.s is None:
self.connect()
data = self.s.recv( self.limit )
while( select.select((self.s,), (), (), 0.1)[0] ): #TODO: this select is probably not necessary, because server closes this socket
d = self.s.recv( self.limit )
if len(d) == 0:
break
data += d
self.log( "<-- "+repr(data) )
return data
def sendrecv( self, string ):
"""send data + receive data + close"""
try:
self.send( string )
return self.recv()
finally:
self.close()
def serve(self, handler, n=None):
"""open socket, wait for incoming connections and handle them.
:Parameters:
- n: serve n requests, None=forever
"""
self.close()
self.s = socket.socket( self.s_type, self.s_prot )
try:
self.log( "listen %s" % repr(self.addr) )
self.s.bind( self.addr )
self.s.listen(1)
n_current = 0
while 1:
if n is not None and n_current >= n:
break
conn, addr = self.s.accept()
self.log( "%s connected" % repr(addr) )
data = conn.recv(self.limit)
self.log( "%s --> %s" % (repr(addr), repr(data)) )
result = handler(data)
if data is not None:
self.log( "%s <-- %s" % (repr(addr), repr(result)) )
conn.send( result )
self.log( "%s close" % repr(addr) )
conn.close()
n_current += 1
finally:
self.close()
if hasattr(socket, 'AF_UNIX'):
class TransportUnixSocket(TransportSocket):
"""Transport via Unix Domain Socket.
"""
#def __init__(self, addr=None, limit=4096, timeout=5.0, logfunc=log_dummy):
def __init__(self, addr=None, limit=44096, timeout=500.0, logfunc=log_dummy):
"""
:Parameters:
- addr: "socket_file"
:Note: | The socket-file is not deleted.
| If the socket-file begins with \x00, abstract sockets are used,
and no socket-file is created.
:SeeAlso: TransportSocket
"""
TransportSocket.__init__( self, addr, limit, socket.AF_UNIX, socket.SOCK_STREAM, timeout, logfunc )
class TransportTcpIp(TransportSocket):
"""Transport via TCP/IP.
"""
#def __init__(self, addr=None, limit=4096, timeout=5.0, logfunc=log_dummy):
def __init__(self, addr=None, limit=44096, timeout=500.0, logfunc=log_dummy):
"""
:Parameters:
- addr: ("host",port)
:SeeAlso: TransportSocket
"""
TransportSocket.__init__( self, addr, limit, socket.AF_INET, socket.SOCK_STREAM, timeout, logfunc )
#=========================================
# client side: server proxy
class ServerProxy:
"""RPC-client: server proxy
A logical connection to a RPC server.
It works with different data/serializers and different transports.
Notifications and id-handling/multicall are not yet implemented.
:Example:
see module-docstring
:TODO: verbose/logging?
"""
def __init__( self, data_serializer, transport ):
"""
:Parameters:
- data_serializer: a data_structure+serializer-instance
- transport: a Transport instance
"""
#TODO: check parameters
self.__data_serializer = data_serializer
if not isinstance(transport, Transport):
raise ValueError('invalid "transport" (must be a Transport-instance)"')
self.__transport = transport
def __str__(self):
return repr(self)
def __repr__(self):
return "<ServerProxy for %s, with serializer %s>" % (self.__transport, self.__data_serializer)
def __req( self, methodname, args=None, kwargs=None, id=0 ):
# JSON-RPC 1.0: only positional parameters
if len(kwargs) > 0 and isinstance(self.data_serializer, JsonRpc10):
raise ValueError("Only positional parameters allowed in JSON-RPC 1.0")
# JSON-RPC 2.0: only args OR kwargs allowed!
if len(args) > 0 and len(kwargs) > 0:
raise ValueError("Only positional or named parameters are allowed!")
if len(kwargs) == 0:
req_str = self.__data_serializer.dumps_request( methodname, args, id )
else:
req_str = self.__data_serializer.dumps_request( methodname, kwargs, id )
try:
resp_str = self.__transport.sendrecv( req_str )
except Exception,err:
raise RPCTransportError(err)
resp = self.__data_serializer.loads_response( resp_str )
return resp[0]
def __getattr__(self, name):
# magic method dispatcher
# note: to call a remote object with an non-standard name, use
# result getattr(my_server_proxy, "strange-python-name")(args)
return _method(self.__req, name)
# request dispatcher
class _method:
"""some "magic" to bind an RPC method to an RPC server.
Supports "nested" methods (e.g. examples.getStateName).
:Raises: AttributeError for method-names/attributes beginning with '_'.
"""
def __init__(self, req, name):
if name[0] == "_": #prevent rpc-calls for proxy._*-functions
raise AttributeError("invalid attribute '%s'" % name)
self.__req = req
self.__name = name
def __getattr__(self, name):
if name[0] == "_": #prevent rpc-calls for proxy._*-functions
raise AttributeError("invalid attribute '%s'" % name)
return _method(self.__req, "%s.%s" % (self.__name, name))
def __call__(self, *args, **kwargs):
return self.__req(self.__name, args, kwargs)
#=========================================
# server side: Server
class Server:
"""RPC-server.
It works with different data/serializers and
with different transports.
:Example:
see module-docstring
:TODO:
- mixed JSON-RPC 1.0/2.0 server?
- logging/loglevels?
"""
def __init__( self, data_serializer, transport, logfile=None ):
"""
:Parameters:
- data_serializer: a data_structure+serializer-instance
- transport: a Transport instance
- logfile: file to log ("unexpected") errors to
"""
#TODO: check parameters
self.__data_serializer = data_serializer
if not isinstance(transport, Transport):
raise ValueError('invalid "transport" (must be a Transport-instance)"')
self.__transport = transport
self.logfile = logfile
if self.logfile is not None: #create logfile (or raise exception)
f = codecs.open( self.logfile, 'a', encoding='utf-8' )
f.close()
self.funcs = {}
def __repr__(self):
return "<Server for %s, with serializer %s>" % (self.__transport, self.__data_serializer)
def log(self, message):
"""write a message to the logfile (in utf-8)"""
if self.logfile is not None:
f = codecs.open( self.logfile, 'a', encoding='utf-8' )
f.write( time.strftime("%Y-%m-%d %H:%M:%S ")+message+"\n" )
f.close()
def register_instance(self, myinst, name=None):
"""Add all functions of a class-instance to the RPC-services.
All entries of the instance which do not begin with '_' are added.
:Parameters:
- myinst: class-instance containing the functions
- name: | hierarchical prefix.
| If omitted, the functions are added directly.
| If given, the functions are added as "name.function".
:TODO:
- only add functions and omit attributes?
- improve hierarchy?
"""
for e in dir(myinst):
if e[0][0] != "_":
if name is None:
self.register_function( getattr(myinst, e) )
else:
self.register_function( getattr(myinst, e), name="%s.%s" % (name, e) )
def register_function(self, function, name=None):
"""Add a function to the RPC-services.
:Parameters:
- function: function to add
- name: RPC-name for the function. If omitted/None, the original
name of the function is used.
"""
if name is None:
self.funcs[function.__name__] = function
else:
self.funcs[name] = function
def handle(self, rpcstr):
"""Handle a RPC-Request.
:Parameters:
- rpcstr: the received rpc-string
:Returns: the data to send back or None if nothing should be sent back
:Raises: RPCFault (and maybe others)
"""
#TODO: id
notification = False
try:
req = self.__data_serializer.loads_request( rpcstr )
if len(req) == 2: #notification
method, params = req
notification = True
else: #request
method, params, id = req
except RPCFault, err:
return self.__data_serializer.dumps_error( err, id=None )
except Exception, err:
self.log( "%d (%s): %s" % (INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], str(err)) )
return self.__data_serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id=None )
if method not in self.funcs:
if notification:
return None
return self.__data_serializer.dumps_error( RPCFault(METHOD_NOT_FOUND, ERROR_MESSAGE[METHOD_NOT_FOUND]), id )
try:
if isinstance(params, dict):
result = self.funcs[method]( **params )
else:
result = self.funcs[method]( *params )
except RPCFault, err:
if notification:
return None
return self.__data_serializer.dumps_error( err, id=None )
except Exception, err:
if notification:
return None
self.log( "%d (%s): %s" % (INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], str(err)) )
return self.__data_serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id )
if notification:
return None
try:
return self.__data_serializer.dumps_response( result, id )
except Exception, err:
self.log( "%d (%s): %s" % (INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], str(err)) )
return self.__data_serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id )
def serve(self, n=None):
"""serve (forever or for n communicaions).
:See: Transport
"""
self.__transport.serve( self.handle, n )
#=========================================
| 40.339518 | 139 | 0.577128 |
"""
JSON-RPC (remote procedure call).
It consists of 3 (independent) parts:
- proxy/dispatcher
- data structure / serializer
- transport
It's intended for JSON-RPC, but since the above 3 parts are independent,
it could be used for other RPCs as well.
Currently, JSON-RPC 2.0(pre) and JSON-RPC 1.0 are implemented
:Version: 2008-08-31-beta
:Status: experimental
:Example:
simple Client with JsonRPC2.0 and TCP/IP::
>>> proxy = ServerProxy( JsonRpc20(), TransportTcpIp(addr=("127.0.0.1",31415)) )
>>> proxy.echo( "hello world" )
u'hello world'
>>> proxy.echo( "bye." )
u'bye.'
simple Server with JsonRPC2.0 and TCP/IP with logging to STDOUT::
>>> server = Server( JsonRpc20(), TransportTcpIp(addr=("127.0.0.1",31415), logfunc=log_stdout) )
>>> def echo( s ):
... return s
>>> server.register_function( echo )
>>> server.serve( 2 ) # serve 2 requests # doctest: +ELLIPSIS
listen ('127.0.0.1', 31415)
('127.0.0.1', ...) connected
('127.0.0.1', ...) <-- {"jsonrpc": "2.0", "method": "echo", "params": ["hello world"], "id": 0}
('127.0.0.1', ...) --> {"jsonrpc": "2.0", "result": "hello world", "id": 0}
('127.0.0.1', ...) close
('127.0.0.1', ...) connected
('127.0.0.1', ...) <-- {"jsonrpc": "2.0", "method": "echo", "params": ["bye."], "id": 0}
('127.0.0.1', ...) --> {"jsonrpc": "2.0", "result": "bye.", "id": 0}
('127.0.0.1', ...) close
close ('127.0.0.1', 31415)
Client with JsonRPC2.0 and an abstract Unix Domain Socket::
>>> proxy = ServerProxy( JsonRpc20(), TransportUnixSocket(addr="\\x00.rpcsocket") )
>>> proxy.hi( message="hello" ) #named parameters
u'hi there'
>>> proxy.test() #fault
Traceback (most recent call last):
...
jsonrpc.RPCMethodNotFound: <RPCFault -32601: u'Method not found.' (None)>
>>> proxy.debug.echo( "hello world" ) #hierarchical procedures
u'hello world'
Server with JsonRPC2.0 and abstract Unix Domain Socket with a logfile::
>>> server = Server( JsonRpc20(), TransportUnixSocket(addr="\\x00.rpcsocket", logfunc=log_file("mylog.txt")) )
>>> def echo( s ):
... return s
>>> def hi( message ):
... return "hi there"
>>> server.register_function( hi )
>>> server.register_function( echo, name="debug.echo" )
>>> server.serve( 3 ) # serve 3 requests
"mylog.txt" then contains:
listen '\\x00.rpcsocket'
'' connected
'' --> '{"jsonrpc": "2.0", "method": "hi", "params": {"message": "hello"}, "id": 0}'
'' <-- '{"jsonrpc": "2.0", "result": "hi there", "id": 0}'
'' close
'' connected
'' --> '{"jsonrpc": "2.0", "method": "test", "id": 0}'
'' <-- '{"jsonrpc": "2.0", "error": {"code":-32601, "message": "Method not found."}, "id": 0}'
'' close
'' connected
'' --> '{"jsonrpc": "2.0", "method": "debug.echo", "params": ["hello world"], "id": 0}'
'' <-- '{"jsonrpc": "2.0", "result": "hello world", "id": 0}'
'' close
close '\\x00.rpcsocket'
:Note: all exceptions derived from RPCFault are propagated to the client.
other exceptions are logged and result in a sent-back "empty" INTERNAL_ERROR.
:Uses: simplejson, socket, sys,time,codecs
:SeeAlso: JSON-RPC 2.0 proposal, 1.0 specification
:Warning:
.. Warning::
This is **experimental** code!
:Bug:
:Author: Roland Koebler (rk(at)simple-is-better.org)
:Copyright: 2007-2008 by Roland Koebler (rk(at)simple-is-better.org)
:License: see __license__
:Changelog:
- 2008-08-31: 1st release
TODO:
- server: multithreading rpc-server
- client: multicall (send several requests)
- transport: SSL sockets, maybe HTTP, HTTPS
- types: support for date/time (ISO 8601)
- errors: maybe customizable error-codes/exceptions
- mixed 1.0/2.0 server ?
- system description etc. ?
- maybe test other json-serializers, like cjson?
"""
__version__ = "2008-08-31-beta"
__author__ = "Roland Koebler <rk(at)simple-is-better.org>"
__license__ = """Copyright (c) 2007-2008 by Roland Koebler (rk(at)simple-is-better.org)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
#=========================================
#import
import sys
try:
import json
except ImportError:
import simplejson as json
#=========================================
# errors
#----------------------
# error-codes + exceptions
#JSON-RPC 2.0 error-codes
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_METHOD_PARAMS = -32602 #invalid number/type of parameters
INTERNAL_ERROR = -32603 #"all other errors"
#additional error-codes
PROCEDURE_EXCEPTION = -32000
AUTHENTIFICATION_ERROR = -32001
PERMISSION_DENIED = -32002
INVALID_PARAM_VALUES = -32003
#human-readable messages
ERROR_MESSAGE = {
PARSE_ERROR : "Parse error.",
INVALID_REQUEST : "Invalid Request.",
METHOD_NOT_FOUND : "Method not found.",
INVALID_METHOD_PARAMS : "Invalid parameters.",
INTERNAL_ERROR : "Internal error.",
PROCEDURE_EXCEPTION : "Procedure exception.",
AUTHENTIFICATION_ERROR : "Authentification error.",
PERMISSION_DENIED : "Permission denied.",
INVALID_PARAM_VALUES: "Invalid parameter values."
}
#----------------------
# exceptions
class RPCError(Exception):
"""Base class for rpc-errors."""
class RPCTransportError(RPCError):
"""Transport error."""
class RPCTimeoutError(RPCTransportError):
"""Transport/reply timeout."""
class RPCFault(RPCError):
"""RPC error/fault package received.
This exception can also be used as a class, to generate a
RPC-error/fault message.
:Variables:
- error_code: the RPC error-code
- error_string: description of the error
- error_data: optional additional information
(must be json-serializable)
:TODO: improve __str__
"""
def __init__(self, error_code, error_message, error_data=None):
RPCError.__init__(self)
self.error_code = error_code
self.error_message = error_message
self.error_data = error_data
def __str__(self):
return repr(self)
def __repr__(self):
return( "<RPCFault %s: %s (%s)>" % (self.error_code, repr(self.error_message), repr(self.error_data)) )
class RPCParseError(RPCFault):
"""Broken rpc-package. (PARSE_ERROR)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, PARSE_ERROR, ERROR_MESSAGE[PARSE_ERROR], error_data)
class RPCInvalidRPC(RPCFault):
"""Invalid rpc-package. (INVALID_REQUEST)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INVALID_REQUEST, ERROR_MESSAGE[INVALID_REQUEST], error_data)
class RPCMethodNotFound(RPCFault):
"""Method not found. (METHOD_NOT_FOUND)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, METHOD_NOT_FOUND, ERROR_MESSAGE[METHOD_NOT_FOUND], error_data)
class RPCInvalidMethodParams(RPCFault):
"""Invalid method-parameters. (INVALID_METHOD_PARAMS)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INVALID_METHOD_PARAMS, ERROR_MESSAGE[INVALID_METHOD_PARAMS], error_data)
class RPCInternalError(RPCFault):
"""Internal error. (INTERNAL_ERROR)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], error_data)
class RPCProcedureException(RPCFault):
"""Procedure exception. (PROCEDURE_EXCEPTION)"""
def __init__(self, error_data=None):
RPCFault.__init__(self, PROCEDURE_EXCEPTION, ERROR_MESSAGE[PROCEDURE_EXCEPTION], error_data)
class RPCAuthentificationError(RPCFault):
"""AUTHENTIFICATION_ERROR"""
def __init__(self, error_data=None):
RPCFault.__init__(self, AUTHENTIFICATION_ERROR, ERROR_MESSAGE[AUTHENTIFICATION_ERROR], error_data)
class RPCPermissionDenied(RPCFault):
"""PERMISSION_DENIED"""
def __init__(self, error_data=None):
RPCFault.__init__(self, PERMISSION_DENIED, ERROR_MESSAGE[PERMISSION_DENIED], error_data)
class RPCInvalidParamValues(RPCFault):
"""INVALID_PARAM_VALUES"""
def __init__(self, error_data=None):
RPCFault.__init__(self, INVALID_PARAM_VALUES, ERROR_MESSAGE[INVALID_PARAM_VALUES], error_data)
#=========================================
# data structure / serializer
#----------------------
#
def dictkeyclean(d):
"""Convert all keys of the dict 'd' to (ascii-)strings.
:Raises: UnicodeEncodeError
"""
new_d = {}
for (k, v) in d.iteritems():
new_d[str(k)] = v
return new_d
#----------------------
# JSON-RPC 1.0
class JsonRpc10:
"""JSON-RPC V1.0 data-structure / serializer
This implementation is quite liberal in what it accepts: It treats
missing "params" and "id" in Requests and missing "result"/"error" in
Responses as empty/null.
:SeeAlso: JSON-RPC 1.0 specification
:TODO: catch simplejson.dumps not-serializable-exceptions
"""
def __init__(self, dumps=json.dumps, loads=json.loads):
"""init: set serializer to use
:Parameters:
- dumps: json-encoder-function
- loads: json-decoder-function
:Note: The dumps_* functions of this class already directly create
the invariant parts of the resulting json-object themselves,
without using the given json-encoder-function.
"""
self.dumps = dumps
self.loads = loads
def dumps_request( self, method, params=(), id=0 ):
"""serialize JSON-RPC-Request
:Parameters:
- method: the method-name (str/unicode)
- params: the parameters (list/tuple)
- id: if id=None, this results in a Notification
:Returns: | {"method": "...", "params": ..., "id": ...}
| "method", "params" and "id" are always in this order.
:Raises: TypeError if method/params is of wrong type or
not JSON-serializable
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list)):
raise TypeError("params must be a tuple/list.")
return '{"method": %s, "params": %s, "id": %s}' % \
(self.dumps(method), self.dumps(params), self.dumps(id))
def dumps_notification( self, method, params=() ):
"""serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"method": "...", "params": ..., "id": null}
| "method", "params" and "id" are always in this order.
:Raises: see dumps_request
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list)):
raise TypeError("params must be a tuple/list.")
return '{"method": %s, "params": %s, "id": null}' % \
(self.dumps(method), self.dumps(params))
def dumps_response( self, result, id=None ):
"""serialize a JSON-RPC-Response (without error)
:Returns: | {"result": ..., "error": null, "id": ...}
| "result", "error" and "id" are always in this order.
:Raises: TypeError if not JSON-serializable
"""
return '{"result": %s, "error": null, "id": %s}' % \
(self.dumps(result), self.dumps(id))
def dumps_error( self, error, id=None ):
"""serialize a JSON-RPC-Response-error
Since JSON-RPC 1.0 does not define an error-object, this uses the
JSON-RPC 2.0 error-object.
:Parameters:
- error: a RPCFault instance
:Returns: | {"result": null, "error": {"code": error_code, "message": error_message, "data": error_data}, "id": ...}
| "result", "error" and "id" are always in this order, data is omitted if None.
:Raises: ValueError if error is not a RPCFault instance,
TypeError if not JSON-serializable
"""
if not isinstance(error, RPCFault):
raise ValueError("""error must be a RPCFault-instance.""")
if error.error_data is None:
return '{"result": null, "error": {"code":%s, "message": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(id))
else:
return '{"result": null, "error": {"code":%s, "message": %s, "data": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(error.error_data), self.dumps(id))
def loads_request( self, string ):
"""de-serialize a JSON-RPC Request/Notification
:Returns: | [method_name, params, id] or [method_name, params]
| params is a tuple/list
| if id is missing, this is a Notification
:Raises: RPCParseError, RPCInvalidRPC, RPCInvalidMethodParams
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "method" not in data: raise RPCInvalidRPC("""Invalid Request, "method" is missing.""")
if not isinstance(data["method"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Request, "method" must be a string.""")
if "id" not in data: data["id"] = None #be liberal
if "params" not in data: data["params"] = () #be liberal
if not isinstance(data["params"], (list, tuple)):
raise RPCInvalidRPC("""Invalid Request, "params" must be an array.""")
if len(data) != 3: raise RPCInvalidRPC("""Invalid Request, additional fields found.""")
# notification / request
if data["id"] is None:
return data["method"], data["params"] #notification
else:
return data["method"], data["params"], data["id"] #request
def loads_response( self, string ):
"""de-serialize a JSON-RPC Response/error
:Returns: | [result, id] for Responses
:Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC
| Note that for error-packages which do not match the
V2.0-definition, RPCFault(-1, "Error", RECEIVED_ERROR_OBJ)
is raised.
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "id" not in data: raise RPCInvalidRPC("""Invalid Response, "id" missing.""")
if "result" not in data: data["result"] = None #be liberal
if "error" not in data: data["error"] = None #be liberal
if len(data) != 3: raise RPCInvalidRPC("""Invalid Response, additional or missing fields.""")
#error
if data["error"] is not None:
if data["result"] is not None:
raise RPCInvalidRPC("""Invalid Response, one of "result" or "error" must be null.""")
#v2.0 error-format
if( isinstance(data["error"], dict) and "code" in data["error"] and "message" in data["error"] and
(len(data["error"])==2 or ("data" in data["error"] and len(data["error"])==3)) ):
if "data" not in data["error"]:
error_data = None
else:
error_data = data["error"]["data"]
if data["error"]["code"] == PARSE_ERROR:
raise RPCParseError(error_data)
elif data["error"]["code"] == INVALID_REQUEST:
raise RPCInvalidRPC(error_data)
elif data["error"]["code"] == METHOD_NOT_FOUND:
raise RPCMethodNotFound(error_data)
elif data["error"]["code"] == INVALID_METHOD_PARAMS:
raise RPCInvalidMethodParams(error_data)
elif data["error"]["code"] == INTERNAL_ERROR:
raise RPCInternalError(error_data)
elif data["error"]["code"] == PROCEDURE_EXCEPTION:
raise RPCProcedureException(error_data)
elif data["error"]["code"] == AUTHENTIFICATION_ERROR:
raise RPCAuthentificationError(error_data)
elif data["error"]["code"] == PERMISSION_DENIED:
raise RPCPermissionDenied(error_data)
elif data["error"]["code"] == INVALID_PARAM_VALUES:
raise RPCInvalidParamValues(error_data)
else:
raise RPCFault(data["error"]["code"], data["error"]["message"], error_data)
#other error-format
else:
raise RPCFault(-1, "Error", data["error"])
#result
else:
return data["result"], data["id"]
#----------------------
# JSON-RPC 2.0
class JsonRpc20:
"""JSON-RPC V2.0 data-structure / serializer
:SeeAlso: JSON-RPC 2.0 specification
:TODO: catch simplejson.dumps not-serializable-exceptions
"""
def __init__(self, dumps=json.dumps, loads=json.loads):
"""init: set serializer to use
:Parameters:
- dumps: json-encoder-function
- loads: json-decoder-function
:Note: The dumps_* functions of this class already directly create
the invariant parts of the resulting json-object themselves,
without using the given json-encoder-function.
"""
self.dumps = dumps
self.loads = loads
def dumps_request( self, method, params=(), id=0 ):
"""serialize JSON-RPC-Request
:Parameters:
- method: the method-name (str/unicode)
- params: the parameters (list/tuple/dict)
- id: the id (should not be None)
:Returns: | {"jsonrpc": "2.0", "method": "...", "params": ..., "id": ...}
| "jsonrpc", "method", "params" and "id" are always in this order.
| "params" is omitted if empty
:Raises: TypeError if method/params is of wrong type or
not JSON-serializable
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list, dict)):
raise TypeError("params must be a tuple/list/dict or None.")
if params:
return '{"jsonrpc": "2.0", "method": %s, "params": %s, "id": %s}' % \
(self.dumps(method), self.dumps(params), self.dumps(id))
else:
return '{"jsonrpc": "2.0", "method": %s, "id": %s}' % \
(self.dumps(method), self.dumps(id))
def dumps_notification( self, method, params=() ):
"""serialize a JSON-RPC-Notification
:Parameters: see dumps_request
:Returns: | {"jsonrpc": "2.0", "method": "...", "params": ...}
| "jsonrpc", "method" and "params" are always in this order.
:Raises: see dumps_request
"""
if not isinstance(method, (str, unicode)):
raise TypeError('"method" must be a string (or unicode string).')
if not isinstance(params, (tuple, list, dict)):
raise TypeError("params must be a tuple/list/dict or None.")
if params:
return '{"jsonrpc": "2.0", "method": %s, "params": %s}' % \
(self.dumps(method), self.dumps(params))
else:
return '{"jsonrpc": "2.0", "method": %s}' % \
(self.dumps(method))
def dumps_response( self, result, id=None ):
"""serialize a JSON-RPC-Response (without error)
:Returns: | {"jsonrpc": "2.0", "result": ..., "id": ...}
| "jsonrpc", "result", and "id" are always in this order.
:Raises: TypeError if not JSON-serializable
"""
return '{"jsonrpc": "2.0", "result": %s, "id": %s}' % \
(self.dumps(result), self.dumps(id))
def dumps_error( self, error, id=None ):
"""serialize a JSON-RPC-Response-error
:Parameters:
- error: a RPCFault instance
:Returns: | {"jsonrpc": "2.0", "error": {"code": error_code, "message": error_message, "data": error_data}, "id": ...}
| "jsonrpc", "result", "error" and "id" are always in this order, data is omitted if None.
:Raises: ValueError if error is not a RPCFault instance,
TypeError if not JSON-serializable
"""
if not isinstance(error, RPCFault):
raise ValueError("""error must be a RPCFault-instance.""")
if error.error_data is None:
return '{"jsonrpc": "2.0", "error": {"code":%s, "message": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(id))
else:
return '{"jsonrpc": "2.0", "error": {"code":%s, "message": %s, "data": %s}, "id": %s}' % \
(self.dumps(error.error_code), self.dumps(error.error_message), self.dumps(error.error_data), self.dumps(id))
def loads_request( self, string ):
"""de-serialize a JSON-RPC Request/Notification
:Returns: | [method_name, params, id] or [method_name, params]
| params is a tuple/list or dict (with only str-keys)
| if id is missing, this is a Notification
:Raises: RPCParseError, RPCInvalidRPC, RPCInvalidMethodParams
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "jsonrpc" not in data: raise RPCInvalidRPC("""Invalid Response, "jsonrpc" missing.""")
if not isinstance(data["jsonrpc"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Response, "jsonrpc" must be a string.""")
if data["jsonrpc"] != "2.0": raise RPCInvalidRPC("""Invalid jsonrpc version.""")
if "method" not in data: raise RPCInvalidRPC("""Invalid Request, "method" is missing.""")
if not isinstance(data["method"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Request, "method" must be a string.""")
if "params" not in data: data["params"] = ()
#convert params-keys from unicode to str
elif isinstance(data["params"], dict):
try:
data["params"] = dictkeyclean(data["params"])
except UnicodeEncodeError:
raise RPCInvalidMethodParams("Parameter-names must be in ascii.")
elif not isinstance(data["params"], (list, tuple)):
raise RPCInvalidRPC("""Invalid Request, "params" must be an array or object.""")
if not( len(data)==3 or ("id" in data and len(data)==4) ):
raise RPCInvalidRPC("""Invalid Request, additional fields found.""")
# notification / request
if "id" not in data:
return data["method"], data["params"] #notification
else:
return data["method"], data["params"], data["id"] #request
def loads_response( self, string ):
"""de-serialize a JSON-RPC Response/error
:Returns: | [result, id] for Responses
:Raises: | RPCFault+derivates for error-packages/faults, RPCParseError, RPCInvalidRPC
"""
try:
data = self.loads(string)
except ValueError, err:
raise RPCParseError("No valid JSON. (%s)" % str(err))
if not isinstance(data, dict): raise RPCInvalidRPC("No valid RPC-package.")
if "jsonrpc" not in data: raise RPCInvalidRPC("""Invalid Response, "jsonrpc" missing.""")
if not isinstance(data["jsonrpc"], (str, unicode)):
raise RPCInvalidRPC("""Invalid Response, "jsonrpc" must be a string.""")
if data["jsonrpc"] != "2.0": raise RPCInvalidRPC("""Invalid jsonrpc version.""")
if "id" not in data: raise RPCInvalidRPC("""Invalid Response, "id" missing.""")
if "result" not in data: data["result"] = None
if "error" not in data: data["error"] = None
if len(data) != 4: raise RPCInvalidRPC("""Invalid Response, additional or missing fields.""")
#error
if data["error"] is not None:
if data["result"] is not None:
raise RPCInvalidRPC("""Invalid Response, only "result" OR "error" allowed.""")
if not isinstance(data["error"], dict): raise RPCInvalidRPC("Invalid Response, invalid error-object.")
if "code" not in data["error"] or "message" not in data["error"]:
raise RPCInvalidRPC("Invalid Response, invalid error-object.")
if "data" not in data["error"]: data["error"]["data"] = None
if len(data["error"]) != 3:
raise RPCInvalidRPC("Invalid Response, invalid error-object.")
error_data = data["error"]["data"]
if data["error"]["code"] == PARSE_ERROR:
raise RPCParseError(error_data)
elif data["error"]["code"] == INVALID_REQUEST:
raise RPCInvalidRPC(error_data)
elif data["error"]["code"] == METHOD_NOT_FOUND:
raise RPCMethodNotFound(error_data)
elif data["error"]["code"] == INVALID_METHOD_PARAMS:
raise RPCInvalidMethodParams(error_data)
elif data["error"]["code"] == INTERNAL_ERROR:
raise RPCInternalError(error_data)
elif data["error"]["code"] == PROCEDURE_EXCEPTION:
raise RPCProcedureException(error_data)
elif data["error"]["code"] == AUTHENTIFICATION_ERROR:
raise RPCAuthentificationError(error_data)
elif data["error"]["code"] == PERMISSION_DENIED:
raise RPCPermissionDenied(error_data)
elif data["error"]["code"] == INVALID_PARAM_VALUES:
raise RPCInvalidParamValues(error_data)
else:
raise RPCFault(data["error"]["code"], data["error"]["message"], error_data)
#result
else:
return data["result"], data["id"]
#=========================================
# transports
#----------------------
# transport-logging
import codecs
import time
def log_dummy( message ):
"""dummy-logger: do nothing"""
pass
def log_stdout( message ):
"""print message to STDOUT"""
print message
def log_file( filename ):
"""return a logfunc which logs to a file (in utf-8)"""
def logfile( message ):
f = codecs.open( filename, 'a', encoding='utf-8' )
f.write( message+"\n" )
f.close()
return logfile
def log_filedate( filename ):
"""return a logfunc which logs date+message to a file (in utf-8)"""
def logfile( message ):
f = codecs.open( filename, 'a', encoding='utf-8' )
f.write( time.strftime("%Y-%m-%d %H:%M:%S ")+message+"\n" )
f.close()
return logfile
#----------------------
class Transport:
"""generic Transport-interface.
This class, and especially its methods and docstrings,
define the Transport-Interface.
"""
def __init__(self):
pass
def send( self, data ):
"""send all data. must be implemented by derived classes."""
raise NotImplementedError
def recv( self ):
"""receive data. must be implemented by derived classes."""
raise NotImplementedError
def sendrecv( self, string ):
"""send + receive data"""
self.send( string )
return self.recv()
def serve( self, handler, n=None ):
"""serve (forever or for n communicaions).
- receive data
- call result = handler(data)
- send back result if not None
The serving can be stopped by SIGINT.
:TODO:
- how to stop?
maybe use a .run-file, and stop server if file removed?
- maybe make n_current accessible? (e.g. for logging)
"""
n_current = 0
while 1:
if n is not None and n_current >= n:
break
data = self.recv()
result = handler(data)
if result is not None:
self.send( result )
n_current += 1
class TransportSTDINOUT(Transport):
"""receive from STDIN, send to STDOUT.
Useful e.g. for debugging.
"""
def send(self, string):
"""write data to STDOUT with '***SEND:' prefix """
print "***SEND:"
print string
def recv(self):
"""read data from STDIN"""
print "***RECV (please enter, ^D ends.):"
return sys.stdin.read()
import socket, select
class TransportSocket(Transport):
"""Transport via socket.
:SeeAlso: python-module socket
:TODO:
- documentation
- improve this (e.g. make sure that connections are closed, socket-files are deleted etc.)
- exception-handling? (socket.error)
"""
#def __init__( self, addr, limit=4096, sock_type=socket.AF_INET, sock_prot=socket.SOCK_STREAM, timeout=5.0, logfunc=log_dummy ):
def __init__( self, addr, limit=44096, sock_type=socket.AF_INET, sock_prot=socket.SOCK_STREAM, timeout=500.0, logfunc=log_dummy ):
"""
:Parameters:
- addr: socket-address
- timeout: timeout in seconds
- logfunc: function for logging, logfunc(message)
:Raises: socket.timeout after timeout
"""
self.limit = limit
self.addr = addr
self.s_type = sock_type
self.s_prot = sock_prot
self.s = None
self.timeout = timeout
self.log = logfunc
def connect( self ):
self.close()
self.log( "connect to %s" % repr(self.addr) )
self.s = socket.socket( self.s_type, self.s_prot )
self.s.settimeout( self.timeout )
self.s.connect( self.addr )
def close( self ):
if self.s is not None:
self.log( "close %s" % repr(self.addr) )
self.s.close()
self.s = None
def __repr__(self):
return "<TransportSocket, %s>" % repr(self.addr)
def send( self, string ):
if self.s is None:
self.connect()
self.log( "--> "+repr(string) )
self.s.sendall( string )
def recv( self ):
if self.s is None:
self.connect()
data = self.s.recv( self.limit )
while( select.select((self.s,), (), (), 0.1)[0] ): #TODO: this select is probably not necessary, because server closes this socket
d = self.s.recv( self.limit )
if len(d) == 0:
break
data += d
self.log( "<-- "+repr(data) )
return data
def sendrecv( self, string ):
"""send data + receive data + close"""
try:
self.send( string )
return self.recv()
finally:
self.close()
def serve(self, handler, n=None):
"""open socket, wait for incoming connections and handle them.
:Parameters:
- n: serve n requests, None=forever
"""
self.close()
self.s = socket.socket( self.s_type, self.s_prot )
try:
self.log( "listen %s" % repr(self.addr) )
self.s.bind( self.addr )
self.s.listen(1)
n_current = 0
while 1:
if n is not None and n_current >= n:
break
conn, addr = self.s.accept()
self.log( "%s connected" % repr(addr) )
data = conn.recv(self.limit)
self.log( "%s --> %s" % (repr(addr), repr(data)) )
result = handler(data)
if data is not None:
self.log( "%s <-- %s" % (repr(addr), repr(result)) )
conn.send( result )
self.log( "%s close" % repr(addr) )
conn.close()
n_current += 1
finally:
self.close()
if hasattr(socket, 'AF_UNIX'):
class TransportUnixSocket(TransportSocket):
"""Transport via Unix Domain Socket.
"""
#def __init__(self, addr=None, limit=4096, timeout=5.0, logfunc=log_dummy):
def __init__(self, addr=None, limit=44096, timeout=500.0, logfunc=log_dummy):
"""
:Parameters:
- addr: "socket_file"
:Note: | The socket-file is not deleted.
| If the socket-file begins with \x00, abstract sockets are used,
and no socket-file is created.
:SeeAlso: TransportSocket
"""
TransportSocket.__init__( self, addr, limit, socket.AF_UNIX, socket.SOCK_STREAM, timeout, logfunc )
class TransportTcpIp(TransportSocket):
"""Transport via TCP/IP.
"""
#def __init__(self, addr=None, limit=4096, timeout=5.0, logfunc=log_dummy):
def __init__(self, addr=None, limit=44096, timeout=500.0, logfunc=log_dummy):
"""
:Parameters:
- addr: ("host",port)
:SeeAlso: TransportSocket
"""
TransportSocket.__init__( self, addr, limit, socket.AF_INET, socket.SOCK_STREAM, timeout, logfunc )
#=========================================
# client side: server proxy
class ServerProxy:
"""RPC-client: server proxy
A logical connection to a RPC server.
It works with different data/serializers and different transports.
Notifications and id-handling/multicall are not yet implemented.
:Example:
see module-docstring
:TODO: verbose/logging?
"""
def __init__( self, data_serializer, transport ):
"""
:Parameters:
- data_serializer: a data_structure+serializer-instance
- transport: a Transport instance
"""
#TODO: check parameters
self.__data_serializer = data_serializer
if not isinstance(transport, Transport):
raise ValueError('invalid "transport" (must be a Transport-instance)"')
self.__transport = transport
def __str__(self):
return repr(self)
def __repr__(self):
return "<ServerProxy for %s, with serializer %s>" % (self.__transport, self.__data_serializer)
def __req( self, methodname, args=None, kwargs=None, id=0 ):
# JSON-RPC 1.0: only positional parameters
if len(kwargs) > 0 and isinstance(self.data_serializer, JsonRpc10):
raise ValueError("Only positional parameters allowed in JSON-RPC 1.0")
# JSON-RPC 2.0: only args OR kwargs allowed!
if len(args) > 0 and len(kwargs) > 0:
raise ValueError("Only positional or named parameters are allowed!")
if len(kwargs) == 0:
req_str = self.__data_serializer.dumps_request( methodname, args, id )
else:
req_str = self.__data_serializer.dumps_request( methodname, kwargs, id )
try:
resp_str = self.__transport.sendrecv( req_str )
except Exception,err:
raise RPCTransportError(err)
resp = self.__data_serializer.loads_response( resp_str )
return resp[0]
def __getattr__(self, name):
# magic method dispatcher
# note: to call a remote object with an non-standard name, use
# result getattr(my_server_proxy, "strange-python-name")(args)
return _method(self.__req, name)
# request dispatcher
class _method:
"""some "magic" to bind an RPC method to an RPC server.
Supports "nested" methods (e.g. examples.getStateName).
:Raises: AttributeError for method-names/attributes beginning with '_'.
"""
def __init__(self, req, name):
if name[0] == "_": #prevent rpc-calls for proxy._*-functions
raise AttributeError("invalid attribute '%s'" % name)
self.__req = req
self.__name = name
def __getattr__(self, name):
if name[0] == "_": #prevent rpc-calls for proxy._*-functions
raise AttributeError("invalid attribute '%s'" % name)
return _method(self.__req, "%s.%s" % (self.__name, name))
def __call__(self, *args, **kwargs):
return self.__req(self.__name, args, kwargs)
#=========================================
# server side: Server
class Server:
"""RPC-server.
It works with different data/serializers and
with different transports.
:Example:
see module-docstring
:TODO:
- mixed JSON-RPC 1.0/2.0 server?
- logging/loglevels?
"""
def __init__( self, data_serializer, transport, logfile=None ):
"""
:Parameters:
- data_serializer: a data_structure+serializer-instance
- transport: a Transport instance
- logfile: file to log ("unexpected") errors to
"""
#TODO: check parameters
self.__data_serializer = data_serializer
if not isinstance(transport, Transport):
raise ValueError('invalid "transport" (must be a Transport-instance)"')
self.__transport = transport
self.logfile = logfile
if self.logfile is not None: #create logfile (or raise exception)
f = codecs.open( self.logfile, 'a', encoding='utf-8' )
f.close()
self.funcs = {}
def __repr__(self):
return "<Server for %s, with serializer %s>" % (self.__transport, self.__data_serializer)
def log(self, message):
"""write a message to the logfile (in utf-8)"""
if self.logfile is not None:
f = codecs.open( self.logfile, 'a', encoding='utf-8' )
f.write( time.strftime("%Y-%m-%d %H:%M:%S ")+message+"\n" )
f.close()
def register_instance(self, myinst, name=None):
"""Add all functions of a class-instance to the RPC-services.
All entries of the instance which do not begin with '_' are added.
:Parameters:
- myinst: class-instance containing the functions
- name: | hierarchical prefix.
| If omitted, the functions are added directly.
| If given, the functions are added as "name.function".
:TODO:
- only add functions and omit attributes?
- improve hierarchy?
"""
for e in dir(myinst):
if e[0][0] != "_":
if name is None:
self.register_function( getattr(myinst, e) )
else:
self.register_function( getattr(myinst, e), name="%s.%s" % (name, e) )
def register_function(self, function, name=None):
"""Add a function to the RPC-services.
:Parameters:
- function: function to add
- name: RPC-name for the function. If omitted/None, the original
name of the function is used.
"""
if name is None:
self.funcs[function.__name__] = function
else:
self.funcs[name] = function
def handle(self, rpcstr):
"""Handle a RPC-Request.
:Parameters:
- rpcstr: the received rpc-string
:Returns: the data to send back or None if nothing should be sent back
:Raises: RPCFault (and maybe others)
"""
#TODO: id
notification = False
try:
req = self.__data_serializer.loads_request( rpcstr )
if len(req) == 2: #notification
method, params = req
notification = True
else: #request
method, params, id = req
except RPCFault, err:
return self.__data_serializer.dumps_error( err, id=None )
except Exception, err:
self.log( "%d (%s): %s" % (INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], str(err)) )
return self.__data_serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id=None )
if method not in self.funcs:
if notification:
return None
return self.__data_serializer.dumps_error( RPCFault(METHOD_NOT_FOUND, ERROR_MESSAGE[METHOD_NOT_FOUND]), id )
try:
if isinstance(params, dict):
result = self.funcs[method]( **params )
else:
result = self.funcs[method]( *params )
except RPCFault, err:
if notification:
return None
return self.__data_serializer.dumps_error( err, id=None )
except Exception, err:
if notification:
return None
self.log( "%d (%s): %s" % (INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], str(err)) )
return self.__data_serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id )
if notification:
return None
try:
return self.__data_serializer.dumps_response( result, id )
except Exception, err:
self.log( "%d (%s): %s" % (INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR], str(err)) )
return self.__data_serializer.dumps_error( RPCFault(INTERNAL_ERROR, ERROR_MESSAGE[INTERNAL_ERROR]), id )
def serve(self, n=None):
"""serve (forever or for n communicaions).
:See: Transport
"""
self.__transport.serve( self.handle, n )
#=========================================
| false | true |
f731498360202437e8db6137f9cfaad521cd7f82 | 5,286 | py | Python | music_extractor.py | reeechart/ricommender | c5cdf1cb9db27b9fc4a2553aee2b705b9ad0b95a | [
"MIT"
] | null | null | null | music_extractor.py | reeechart/ricommender | c5cdf1cb9db27b9fc4a2553aee2b705b9ad0b95a | [
"MIT"
] | null | null | null | music_extractor.py | reeechart/ricommender | c5cdf1cb9db27b9fc4a2553aee2b705b9ad0b95a | [
"MIT"
] | null | null | null | import csv
import eyed3
import librosa
import numpy as np
import sys
def load_editorial_metadata(audiofile):
'''Loads an audio file and extract its editorial metadata
Args:
audiofile (string): audio file to be extracted.
Returns:
title (string): title of the mp3 file
artist (string): artist/singer of the song in mp3 file
album (string): name of album of the mp3 file
'''
audio = eyed3.load(audiofile)
return audio.tag.title, audio.tag.artist, audio.tag.album
def get_reformatted_music_file_directory(file):
'''Returns a reformatted music file directory
Args:
file (string): audio file directory to be reformatted
Returns:
directory (string): reformatted music file directory
'''
splitted_dir = file.split('\\')
directory = '/'.join(splitted_dir[-2:])
return directory
def extract_music_content(directory):
'''Extracts mp3 metadata from a specified directory
Args:
directory (string): directory that contains the mp3 files
Returns:
metadata ([string]): list of mp3 metadata with a structure of
(file, title, artist, album, mfcc, zcr, tempo, chroma_stft)
'''
all_metadata = [['id', 'file', 'title', 'artist', 'album', 'mfcc', 'zcr', 'tempo', 'pitch', 'chroma', 'num_frames']]
files = librosa.util.find_files(directory, ext='mp3')
for idx, file in enumerate(files):
print('Extracting ', file, '...')
music_metadata = []
music_metadata.append(idx)
title, artist, album = load_editorial_metadata(file)
music_metadata.append(get_reformatted_music_file_directory(file))
music_metadata.append(title)
music_metadata.append(artist)
music_metadata.append(album)
wf, sr = librosa.load(file)
mfcc = librosa.feature.mfcc(y=wf, sr=sr)
music_metadata.append(np.mean(mfcc))
zcr = librosa.feature.zero_crossing_rate(y=wf)
music_metadata.append(np.mean(zcr))
tempo = librosa.beat.tempo(y=wf, sr=sr)
music_metadata.append(tempo[0])
# Get pitches array and its corresponding power (magnitude)
pitches, magnitudes = librosa.piptrack(y=wf, sr=sr)
# Select pitches with high energy (bigger than its median)
pitches = pitches[magnitudes > np.median(magnitudes)]
pitch = librosa.pitch_tuning(pitches)
music_metadata.append(pitch)
chroma_stft = librosa.feature.chroma_stft(y=wf, sr=sr)
music_metadata.append(np.mean(chroma_stft))
music_metadata.append(len(mfcc[0]))
all_metadata.append(music_metadata)
return all_metadata
def extract_music_frames(directory):
'''Extracts mp3 metadata by frame
Args:
directory (string): directory that contains mp3 files
Returns:
metadata ([string]): all frames metadata
'''
all_metadata = [['id', 'mean_thirteen_first_mfcc', 'zcr', 'max_chroma']]
files = librosa.util.find_files(directory, ext='mp3')
for idx, file in enumerate(files):
print('Extracting ', file, '...')
title, artist, _ = load_editorial_metadata(file)
wf, sr = librosa.load(file)
mfcc = librosa.feature.mfcc(y=wf, sr=sr)
mfcc = np.mean(mfcc[:13], axis=0) # take the first 13 mfcc values
zcr = librosa.feature.zero_crossing_rate(y=wf)
zcr = np.mean(zcr, axis=0)
chroma_stft = librosa.feature.chroma_stft(y=wf, sr=sr)
chroma_stft_max = np.argmax(chroma_stft, axis=0)
for i in range(len(mfcc)):
music_frame_metadata = []
music_frame_metadata.append(idx)
music_frame_metadata.append(mfcc[i])
music_frame_metadata.append(zcr[i])
music_frame_metadata.append(chroma_stft_max[i])
all_metadata.append(music_frame_metadata)
return all_metadata
def save_to_csv(data, csv_file):
'''Saves data (list) to a csv file
Args:
data ([object]): list of metadata to be saved
'''
print('Saving metadata to ', csv_file, '...')
with open(csv_file, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(data)
def exit_with_msg(msg):
'''Exit with a custom message
Args:
msg (string): exit message
'''
print(msg)
sys.exit()
def check_arguments(argv):
'''Check arguments when running the program
Args:
argv ([string]): list of arguments
'''
if (len(argv) != 4):
exit_with_msg('Need 4 arguments to continue')
else:
extraction_type = sys.argv[1]
music_folder = sys.argv[2]
csv_file = sys.argv[3]
return extraction_type, music_folder, csv_file
# Main program
if __name__ == '__main__':
extraction_type, music_folder, csv_file = check_arguments(sys.argv)
if (extraction_type == 'extract_music'):
metadata = extract_music_content(music_folder)
save_to_csv(metadata, csv_file)
elif (extraction_type == 'extract_music_frame'):
metadata = extract_music_frames(music_folder)
save_to_csv(metadata, csv_file)
else:
exit_with_msg('Extraction type invalid, please use only \'extract_music\' or \'extract_music_frame\'')
| 30.034091 | 120 | 0.64756 | import csv
import eyed3
import librosa
import numpy as np
import sys
def load_editorial_metadata(audiofile):
audio = eyed3.load(audiofile)
return audio.tag.title, audio.tag.artist, audio.tag.album
def get_reformatted_music_file_directory(file):
splitted_dir = file.split('\\')
directory = '/'.join(splitted_dir[-2:])
return directory
def extract_music_content(directory):
all_metadata = [['id', 'file', 'title', 'artist', 'album', 'mfcc', 'zcr', 'tempo', 'pitch', 'chroma', 'num_frames']]
files = librosa.util.find_files(directory, ext='mp3')
for idx, file in enumerate(files):
print('Extracting ', file, '...')
music_metadata = []
music_metadata.append(idx)
title, artist, album = load_editorial_metadata(file)
music_metadata.append(get_reformatted_music_file_directory(file))
music_metadata.append(title)
music_metadata.append(artist)
music_metadata.append(album)
wf, sr = librosa.load(file)
mfcc = librosa.feature.mfcc(y=wf, sr=sr)
music_metadata.append(np.mean(mfcc))
zcr = librosa.feature.zero_crossing_rate(y=wf)
music_metadata.append(np.mean(zcr))
tempo = librosa.beat.tempo(y=wf, sr=sr)
music_metadata.append(tempo[0])
pitches, magnitudes = librosa.piptrack(y=wf, sr=sr)
pitches = pitches[magnitudes > np.median(magnitudes)]
pitch = librosa.pitch_tuning(pitches)
music_metadata.append(pitch)
chroma_stft = librosa.feature.chroma_stft(y=wf, sr=sr)
music_metadata.append(np.mean(chroma_stft))
music_metadata.append(len(mfcc[0]))
all_metadata.append(music_metadata)
return all_metadata
def extract_music_frames(directory):
all_metadata = [['id', 'mean_thirteen_first_mfcc', 'zcr', 'max_chroma']]
files = librosa.util.find_files(directory, ext='mp3')
for idx, file in enumerate(files):
print('Extracting ', file, '...')
title, artist, _ = load_editorial_metadata(file)
wf, sr = librosa.load(file)
mfcc = librosa.feature.mfcc(y=wf, sr=sr)
mfcc = np.mean(mfcc[:13], axis=0)
zcr = librosa.feature.zero_crossing_rate(y=wf)
zcr = np.mean(zcr, axis=0)
chroma_stft = librosa.feature.chroma_stft(y=wf, sr=sr)
chroma_stft_max = np.argmax(chroma_stft, axis=0)
for i in range(len(mfcc)):
music_frame_metadata = []
music_frame_metadata.append(idx)
music_frame_metadata.append(mfcc[i])
music_frame_metadata.append(zcr[i])
music_frame_metadata.append(chroma_stft_max[i])
all_metadata.append(music_frame_metadata)
return all_metadata
def save_to_csv(data, csv_file):
print('Saving metadata to ', csv_file, '...')
with open(csv_file, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(data)
def exit_with_msg(msg):
print(msg)
sys.exit()
def check_arguments(argv):
if (len(argv) != 4):
exit_with_msg('Need 4 arguments to continue')
else:
extraction_type = sys.argv[1]
music_folder = sys.argv[2]
csv_file = sys.argv[3]
return extraction_type, music_folder, csv_file
if __name__ == '__main__':
extraction_type, music_folder, csv_file = check_arguments(sys.argv)
if (extraction_type == 'extract_music'):
metadata = extract_music_content(music_folder)
save_to_csv(metadata, csv_file)
elif (extraction_type == 'extract_music_frame'):
metadata = extract_music_frames(music_folder)
save_to_csv(metadata, csv_file)
else:
exit_with_msg('Extraction type invalid, please use only \'extract_music\' or \'extract_music_frame\'')
| true | true |
f73149a9f7e2ac7a2e62263f32e4418400e4b260 | 2,624 | py | Python | src/old_code/utils_old.py | basarane/model-based-rl | af7ba84c272054d1de0b8cf9cc91b571abe91c3d | [
"MIT"
] | null | null | null | src/old_code/utils_old.py | basarane/model-based-rl | af7ba84c272054d1de0b8cf9cc91b571abe91c3d | [
"MIT"
] | null | null | null | src/old_code/utils_old.py | basarane/model-based-rl | af7ba84c272054d1de0b8cf9cc91b571abe91c3d | [
"MIT"
] | null | null | null | import keras.backend as K
import numpy as np
from PIL import Image, ImageDraw
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
print('----- activations -----')
activations = []
inp = model.input
model_multi_inputs_cond = True
if not isinstance(inp, list):
# only one input! let's wrap it in a list.
inp = [inp]
model_multi_inputs_cond = False
#from pprint import pprint
#pprint(vars(model.layers[3]))
for layer in model.layers:
print(layer.name, len(layer.outbound_nodes), len(layer.inbound_nodes))
for I in range(len(layer.inbound_nodes)):
o1 = layer.get_output_at(I)
print(o1.name, o1.shape)
outputs = [[layer.get_output_at(I) for I in range(len(layer.inbound_nodes))] for layer in model.layers if (layer.name == layer_name or layer_name is None)]
outputs = [item for sublist in outputs for item in sublist]
#outputs.extend([])
funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
if model_multi_inputs_cond:
list_inputs = []
list_inputs.extend(model_inputs)
list_inputs.append(0.)
else:
list_inputs = [model_inputs, 0.]
print("model_multi_inputs_cond", model_multi_inputs_cond, len(list_inputs))
# Learning phase. 0 = Test mode (no dropout or batch normalization)
# layer_outputs = [func([model_inputs, 0.])[0] for func in funcs]
layer_outputs = [func(list_inputs)[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
def toRGBImage(x):
im = Image.fromarray(x)
im = im.convert('RGB')
return np.array(im, dtype='uint8')
def prediction_to_image(prediction, meanImage):
predOutput = np.array(prediction)*255.0
predOutput = predOutput + meanImage
predOutput[predOutput<0] = 0
predOutput[predOutput>255] = 255
predOutput = np.array(predOutput, dtype="uint8")
predImage = np.squeeze(predOutput)
return predImage
def draw_reward(predImage, reward):
im = Image.fromarray(predImage)
draw = ImageDraw.Draw(im)
w = 100
x = 57
draw.rectangle([x,196,x+int(w*reward),208], "#fff", None)
draw.rectangle([x,196,x+w,208], None, "#f00")
predImage = np.array(im)
return predImage
def get_obs_input(lastFramesOrig, meanImage):
netin = np.array(lastFramesOrig, dtype='f')/255.0
netin = np.squeeze(netin)
netin = np.transpose(netin, (0,3,1,2))
netin = np.reshape(netin, (12, 210,160))
netin = netin - np.tile(np.transpose(meanImage/255.0, (2,0,1)), (4,1,1))
netin = np.reshape(netin, (1, 12, 210,160))
return netin
| 32 | 156 | 0.726372 | import keras.backend as K
import numpy as np
from PIL import Image, ImageDraw
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
print('----- activations -----')
activations = []
inp = model.input
model_multi_inputs_cond = True
if not isinstance(inp, list):
inp = [inp]
model_multi_inputs_cond = False
#from pprint import pprint
#pprint(vars(model.layers[3]))
for layer in model.layers:
print(layer.name, len(layer.outbound_nodes), len(layer.inbound_nodes))
for I in range(len(layer.inbound_nodes)):
o1 = layer.get_output_at(I)
print(o1.name, o1.shape)
outputs = [[layer.get_output_at(I) for I in range(len(layer.inbound_nodes))] for layer in model.layers if (layer.name == layer_name or layer_name is None)]
outputs = [item for sublist in outputs for item in sublist]
#outputs.extend([])
funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
if model_multi_inputs_cond:
list_inputs = []
list_inputs.extend(model_inputs)
list_inputs.append(0.)
else:
list_inputs = [model_inputs, 0.]
print("model_multi_inputs_cond", model_multi_inputs_cond, len(list_inputs))
# Learning phase. 0 = Test mode (no dropout or batch normalization)
# layer_outputs = [func([model_inputs, 0.])[0] for func in funcs]
layer_outputs = [func(list_inputs)[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
def toRGBImage(x):
im = Image.fromarray(x)
im = im.convert('RGB')
return np.array(im, dtype='uint8')
def prediction_to_image(prediction, meanImage):
predOutput = np.array(prediction)*255.0
predOutput = predOutput + meanImage
predOutput[predOutput<0] = 0
predOutput[predOutput>255] = 255
predOutput = np.array(predOutput, dtype="uint8")
predImage = np.squeeze(predOutput)
return predImage
def draw_reward(predImage, reward):
im = Image.fromarray(predImage)
draw = ImageDraw.Draw(im)
w = 100
x = 57
draw.rectangle([x,196,x+int(w*reward),208], "#fff", None)
draw.rectangle([x,196,x+w,208], None, "#f00")
predImage = np.array(im)
return predImage
def get_obs_input(lastFramesOrig, meanImage):
netin = np.array(lastFramesOrig, dtype='f')/255.0
netin = np.squeeze(netin)
netin = np.transpose(netin, (0,3,1,2))
netin = np.reshape(netin, (12, 210,160))
netin = netin - np.tile(np.transpose(meanImage/255.0, (2,0,1)), (4,1,1))
netin = np.reshape(netin, (1, 12, 210,160))
return netin
| true | true |
f73149c06418dfdcc2ccc576f2c8e8c48e6bdbd1 | 1,157 | py | Python | saleor/graphql/page/schema.py | acabezasg/urpi-master | 7c9cd0fbe6d89dad70652482712ca38b21ba6f84 | [
"BSD-3-Clause"
] | 1 | 2019-04-15T09:37:26.000Z | 2019-04-15T09:37:26.000Z | saleor/graphql/page/schema.py | acabezasg/urpi-master | 7c9cd0fbe6d89dad70652482712ca38b21ba6f84 | [
"BSD-3-Clause"
] | 5 | 2021-03-09T16:22:37.000Z | 2022-02-10T19:10:03.000Z | saleor/graphql/page/schema.py | acabezasg/urpi-master | 7c9cd0fbe6d89dad70652482712ca38b21ba6f84 | [
"BSD-3-Clause"
] | 1 | 2020-12-26T10:25:37.000Z | 2020-12-26T10:25:37.000Z | import graphene
from ..core.fields import PrefetchingConnectionField
from ..descriptions import DESCRIPTIONS
from ..translations.mutations import PageTranslate
from .bulk_mutations import PageBulkDelete
from .mutations import PageCreate, PageDelete, PageUpdate
from .resolvers import resolve_page, resolve_pages
from .types import Page
class PageQueries(graphene.ObjectType):
page = graphene.Field(
Page, id=graphene.Argument(graphene.ID), slug=graphene.String(),
description='Lookup a page by ID or by slug.')
pages = PrefetchingConnectionField(
Page, query=graphene.String(
description=DESCRIPTIONS['page']),
description='List of the shop\'s pages.')
def resolve_page(self, info, id=None, slug=None):
return resolve_page(info, id, slug)
def resolve_pages(self, info, query=None, **kwargs):
return resolve_pages(info, query=query)
class PageMutations(graphene.ObjectType):
page_create = PageCreate.Field()
page_delete = PageDelete.Field()
page_bulk_delete = PageBulkDelete.Field()
page_update = PageUpdate.Field()
page_translate = PageTranslate.Field()
| 34.029412 | 72 | 0.736387 | import graphene
from ..core.fields import PrefetchingConnectionField
from ..descriptions import DESCRIPTIONS
from ..translations.mutations import PageTranslate
from .bulk_mutations import PageBulkDelete
from .mutations import PageCreate, PageDelete, PageUpdate
from .resolvers import resolve_page, resolve_pages
from .types import Page
class PageQueries(graphene.ObjectType):
page = graphene.Field(
Page, id=graphene.Argument(graphene.ID), slug=graphene.String(),
description='Lookup a page by ID or by slug.')
pages = PrefetchingConnectionField(
Page, query=graphene.String(
description=DESCRIPTIONS['page']),
description='List of the shop\'s pages.')
def resolve_page(self, info, id=None, slug=None):
return resolve_page(info, id, slug)
def resolve_pages(self, info, query=None, **kwargs):
return resolve_pages(info, query=query)
class PageMutations(graphene.ObjectType):
page_create = PageCreate.Field()
page_delete = PageDelete.Field()
page_bulk_delete = PageBulkDelete.Field()
page_update = PageUpdate.Field()
page_translate = PageTranslate.Field()
| true | true |
f73149da213043c623eaf8c02ac1225d022f99d9 | 69,506 | py | Python | server/datasets/tcga/constants.py | imwangtongxue-com/digital_slide_archive | 3c08432bf3ca192d8948cbe22a263c2259c542d5 | [
"Apache-2.0"
] | null | null | null | server/datasets/tcga/constants.py | imwangtongxue-com/digital_slide_archive | 3c08432bf3ca192d8948cbe22a263c2259c542d5 | [
"Apache-2.0"
] | null | null | null | server/datasets/tcga/constants.py | imwangtongxue-com/digital_slide_archive | 3c08432bf3ca192d8948cbe22a263c2259c542d5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# flake8: noqa: E501
class TcgaCodes(object):
DISEASE_STUDIES = {
# 'Study Abbreviation': 'Study Name',
'LAML': 'Acute Myeloid Leukemia',
'ACC': 'Adrenocortical carcinoma',
'BLCA': 'Bladder Urothelial Carcinoma',
'LGG': 'Brain Lower Grade Glioma',
'BRCA': 'Breast invasive carcinoma',
'CESC': 'Cervical squamous cell carcinoma and endocervical adenocarcinoma',
'CHOL': 'Cholangiocarcinoma',
'LCML': 'Chronic Myelogenous Leukemia',
'COAD': 'Colon adenocarcinoma',
'CNTL': 'Controls',
'ESCA': 'Esophageal carcinoma ',
'FPPP': 'FFPE Pilot Phase II',
'GBM': 'Glioblastoma multiforme',
'HNSC': 'Head and Neck squamous cell carcinoma',
'KICH': 'Kidney Chromophobe',
'KIRC': 'Kidney renal clear cell carcinoma',
'KIRP': 'Kidney renal papillary cell carcinoma',
'LIHC': 'Liver hepatocellular carcinoma',
'LUAD': 'Lung adenocarcinoma',
'LUSC': 'Lung squamous cell carcinoma',
'DLBC': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma',
'MESO': 'Mesothelioma',
'MISC': 'Miscellaneous',
'OV': 'Ovarian serous cystadenocarcinoma',
'PAAD': 'Pancreatic adenocarcinoma',
'PCPG': 'Pheochromocytoma and Paraganglioma',
'PRAD': 'Prostate adenocarcinoma',
'READ': 'Rectum adenocarcinoma',
'SARC': 'Sarcoma',
'SKCM': 'Skin Cutaneous Melanoma',
'STAD': 'Stomach adenocarcinoma',
'TGCT': 'Testicular Germ Cell Tumors',
'THYM': 'Thymoma',
'THCA': 'Thyroid carcinoma',
'UCS': 'Uterine Carcinosarcoma',
'UCEC': 'Uterine Corpus Endometrial Carcinoma',
'UVM': 'Uveal Melanoma',
}
REPOSITORY_TYPES = {
'bcr', # 'Biospecimen Core Resource'
'cgcc',
'gsc',
}
DATA_PROVIDERS = {
'biotab', # Clinical metadata, skip
'intgen.org',
'nationwidechildrens.org',
'genome.wustl.edu',
'supplemental' # unknown, appears under 'tumor/ov/bcr/', skip
}
DATA_TYPES = {
'bio', # XML format clinical metadata, skip
'biotab', # CSV format clinical metadata, skip
'pathology_reports', # PDF format pathology reports, skip
'diagnostic_images', # SVS format images, use
'tissue_images', # SVS format images, use
'minbio' # unknown, appears under 'tumor/gbm/bcr/intgen.org/', skip
}
SLIDE_LOCATION = {
'TS': 'Top Slide',
'MS': 'Middle Slide',
'BS': 'Bottom Slide',
'DX': 'Top Slide',
}
TISSUE_SOURCE_SITE = {
# 'TSS Code': ('Source Site', 'Study Name', 'BCR'),
'01': ('International Genomics Consortium', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'02': ('MD Anderson Cancer Center', 'Glioblastoma multiforme', 'IGC'),
'04': ('Gynecologic Oncology Group', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'05': ('Indivumed', 'Lung adenocarcinoma', 'IGC'),
'06': ('Henry Ford Hospital', 'Glioblastoma multiforme', 'IGC'),
'07': ('TGen', 'Cell Line Control', 'IGC'),
'08': ('UCSF', 'Glioblastoma multiforme', 'IGC'),
'09': ('UCSF', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'10': ('MD Anderson Cancer Center', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'11': ('MD Anderson Cancer Center', 'Lung squamous cell carcinoma', 'IGC'),
'12': ('Duke', 'Glioblastoma multiforme', 'IGC'),
'13': ('Memorial Sloan Kettering', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'14': ('Emory University', 'Glioblastoma multiforme', 'IGC'),
'15': ('Mayo Clinic - Rochester', 'Glioblastoma multiforme', 'IGC'),
'16': ('Toronto Western Hospital', 'Glioblastoma multiforme', 'IGC'),
'17': ('Washington University', 'Lung adenocarcinoma', 'IGC'),
'18': ('Princess Margaret Hospital (Canada)', 'Lung squamous cell carcinoma', 'IGC'),
'19': ('Case Western', 'Glioblastoma multiforme', 'IGC'),
'1Z': ('Johns Hopkins', 'Thymoma', 'NCH'),
'20': ('Fox Chase Cancer Center', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'21': ('Fox Chase Cancer Center', 'Lung squamous cell carcinoma', 'IGC'),
'22': ('Mayo Clinic - Rochester', 'Lung squamous cell carcinoma', 'IGC'),
'23': ('Cedars Sinai', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'24': ('Washington University', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'25': ('Mayo Clinic - Rochester', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'26': ('University of Florida', 'Glioblastoma multiforme', 'IGC'),
'27': ('Milan - Italy, Fondazione IRCCS Instituto Neuroligico C. Besta', 'Glioblastoma multiforme', 'IGC'),
'28': ('Cedars Sinai', 'Glioblastoma multiforme', 'IGC'),
'29': ('Duke', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'2A': ('Memorial Sloan Kettering Cancer Center', 'Prostate adenocarcinoma', 'NCH'),
'2E': ('University of Kansas Medical Center', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'2F': ('Erasmus MC', 'Bladder Urothelial Carcinoma', 'NCH'),
'2G': ('Erasmus MC', 'Testicular Germ Cell Tumors', 'NCH'),
'2H': ('Erasmus MC', 'Esophageal carcinoma ', 'NCH'),
'2J': ('Mayo Clinic', 'Pancreatic adenocarcinoma', 'NCH'),
'2K': ('Greenville Health System', 'Kidney renal papillary cell carcinoma', 'NCH'),
'2L': ('Technical University of Munich', 'Pancreatic adenocarcinoma', 'NCH'),
'2M': ('Technical University of Munich', 'Esophageal carcinoma ', 'NCH'),
'2N': ('Technical University of Munich', 'Stomach adenocarcinoma', 'NCH'),
'2P': ('University of California San Diego', 'Pancreatic adenocarcinoma', 'NCH'),
'2V': ('University of California San Diego', 'Liver hepatocellular carcinoma', 'NCH'),
'2W': ('University of New Mexico', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'2X': ('ABS IUPUI', 'Testicular Germ Cell Tumors', 'NCH'),
'2Y': ('Moffitt Cancer Center', 'Liver hepatocellular carcinoma', 'NCH'),
'2Z': ('Moffitt Cancer Center', 'Kidney renal papillary cell carcinoma', 'NCH'),
'30': ('Harvard', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'31': ('Imperial College', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'32': ('St. Joseph\'s Hospital (AZ)', 'Glioblastoma multiforme', 'IGC'),
'33': ('Johns Hopkins', 'Lung squamous cell carcinoma', 'IGC'),
'34': ('University of Pittsburgh', 'Lung squamous cell carcinoma', 'IGC'),
'35': ('Cureline', 'Lung adenocarcinoma', 'IGC'),
'36': ('BC Cancer Agency', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'37': ('Cureline', 'Lung squamous cell carcinoma', 'IGC'),
'38': ('UNC', 'Lung adenocarcinoma', 'IGC'),
'39': ('MSKCC', 'Lung squamous cell carcinoma', 'IGC'),
'3A': ('Moffitt Cancer Center', 'Pancreatic adenocarcinoma', 'NCH'),
'3B': ('Moffitt Cancer Center', 'Sarcoma', 'NCH'),
'3C': ('Columbia University', 'Breast invasive carcinoma', 'NCH'),
'3E': ('Columbia University', 'Pancreatic adenocarcinoma', 'NCH'),
'3G': ('MD Anderson Cancer Center', 'Thymoma', 'NCH'),
'3H': ('MD Anderson Cancer Center', 'Mesothelioma', 'NCH'),
'3J': ('Carle Cancer Center', 'Breast invasive carcinoma', 'NCH'),
'3K': ('Boston Medical Center', 'Liver hepatocellular carcinoma', 'NCH'),
'3L': ('Albert Einstein Medical Center', 'Colon adenocarcinoma', 'NCH'),
'3M': ('University of Kansas Medical Center', 'Stomach adenocarcinoma', 'NCH'),
'3N': ('Greenville Health System', 'Skin Cutaneous Melanoma', 'NCH'),
'3P': ('Greenville Health System', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'3Q': ('Greenville Health Systems', 'Thymoma', 'NCH'),
'3R': ('University of New Mexico', 'Sarcoma', 'NCH'),
'3S': ('University of New Mexico', 'Thymoma', 'NCH'),
'3T': ('Emory University', 'Thymoma', 'NCH'),
'3U': ('University of Chicago', 'Mesothelioma', 'NCH'),
'3W': ('University of California San Diego', 'Sarcoma', 'NCH'),
'3X': ('Alberta Health Services', 'Cholangiocarcinoma', 'NCH'),
'3Z': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Kidney renal clear cell carcinoma', 'NCH'),
'41': ('Christiana Healthcare', 'Glioblastoma multiforme', 'IGC'),
'42': ('Christiana Healthcare', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'43': ('Christiana Healthcare', 'Lung squamous cell carcinoma', 'IGC'),
'44': ('Christiana Healthcare', 'Lung adenocarcinoma', 'IGC'),
'46': ('St. Joseph\'s Medical Center (MD)', 'Lung squamous cell carcinoma', 'IGC'),
'49': ('Johns Hopkins', 'Lung adenocarcinoma', 'IGC'),
'4A': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Kidney renal papillary cell carcinoma', 'NCH'),
'4B': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Lung adenocarcinoma', 'NCH'),
'4C': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Thyroid carcinoma', 'NCH'),
'4D': ('Molecular Response', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'4E': ('Molecular Response', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'4G': ('Sapienza University of Rome', 'Cholangiocarcinoma', 'NCH'),
'4H': ('Proteogenex, Inc.', 'Breast invasive carcinoma', 'NCH'),
'4J': ('Proteogenex, Inc.', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'4K': ('Proteogenex, Inc.', 'Testicular Germ Cell Tumors', 'NCH'),
'4L': ('Proteogenex, Inc.', 'Prostate adenocarcinoma', 'NCH'),
'4N': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Colon adenocarcinoma', 'NCH'),
'4P': ('Duke University', 'Head and Neck squamous cell carcinoma', 'NCH'),
'4Q': ('Duke University', 'Sarcoma', 'NCH'),
'4R': ('Duke University', 'Liver hepatocellular carcinoma', 'NCH'),
'4S': ('Duke University', 'Prostate adenocarcinoma', 'NCH'),
'4T': ('Duke University', 'Colon adenocarcinoma', 'NCH'),
'4V': ('Hospital Louis Pradel', 'Thymoma', 'NCH'),
'4W': ('University of Miami', 'Glioblastoma multiforme', 'NCH'),
'4X': ('Yale University', 'Thymoma', 'NCH'),
'4Y': ('Medical College of Wisconsin', 'Sarcoma', 'NCH'),
'4Z': ('Barretos Cancer Hospital', 'Bladder Urothelial Carcinoma', 'NCH'),
'50': ('University of Pittsburgh', 'Lung adenocarcinoma', 'IGC'),
'51': ('UNC', 'Lung squamous cell carcinoma', 'IGC'),
'52': ('University of Miami', 'Lung squamous cell carcinoma', 'IGC'),
'53': ('University of Miami', 'Lung adenocarcinoma', 'IGC'),
'55': ('International Genomics Consortium', 'Lung adenocarcinoma', 'IGC'),
'56': ('International Genomics Consortium', 'Lung squamous cell carcinoma', 'IGC'),
'57': ('International Genomics Consortium', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'58': ('Thoraxklinik at University Hospital Heidelberg', 'Lung squamous cell carcinoma', 'IGC'),
'59': ('Roswell Park', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'5A': ('Wake Forest University', 'Cholangiocarcinoma', 'NCH'),
'5B': ('Medical College of Wisconsin', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'5C': ('Cureline', 'Liver hepatocellular carcinoma', 'NCH'),
'5D': ('University of Miami', 'Sarcoma', 'NCH'),
'5F': ('Duke University', 'Thyroid carcinoma', 'NCH'),
'5G': ('Cleveland Clinic Foundation', 'Thymoma', 'NCH'),
'5H': ('Retina Consultants Houston', 'Uveal Melanoma', 'NCH'),
'5J': ('Cureline', 'Acute Myeloid Leukemia', 'NCH'),
'5K': ('St. Joseph\'s Hospital AZ', 'Thymoma', 'NCH'),
'5L': ('University of Sao Paulo', 'Breast invasive carcinoma', 'NCH'),
'5M': ('University of Sao Paulo', 'Colon adenocarcinoma', 'NCH'),
'5N': ('University Hospital Erlangen', 'Bladder Urothelial Carcinoma', 'NCH'),
'5P': ('University Hospital Erlangen', 'Kidney renal papillary cell carcinoma', 'NCH'),
'5Q': ('Proteogenex, Inc', 'Pancreatic adenocarcinoma', 'NCH'),
'5R': ('Proteogenex, Inc', 'Liver hepatocellular carcinoma', 'NCH'),
'5S': ('Holy Cross', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'5T': ('Holy Cross', 'Breast invasive carcinoma', 'NCH'),
'5U': ('Regina Elena National Cancer Institute', 'Thymoma', 'NCH'),
'5V': ('Roswell Park', 'Thymoma', 'NCH'),
'5W': ('University of Alabama', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'5X': ('University of Alabama', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'60': ('Roswell Park', 'Lung squamous cell carcinoma', 'IGC'),
'61': ('University of Pittsburgh', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'62': ('Thoraxklinik at University Hospital Heidelberg', 'Lung adenocarcinoma', 'IGC'),
'63': ('Ontario Institute for Cancer Research', 'Lung squamous cell carcinoma', 'IGC'),
'64': ('Fox Chase', 'Lung adenocarcinoma', 'IGC'),
'65': ('Roswell Park', 'Glioblastoma multiforme', 'IGC'),
'66': ('Indivumed', 'Lung squamous cell carcinoma', 'IGC'),
'67': ('St Joseph\'s Medical Center (MD)', 'Lung adenocarcinoma', 'IGC'),
'68': ('Washington University - Cleveland Clinic', 'Lung squamous cell carcinoma', 'IGC'),
'69': ('Washington University - Cleveland Clinic', 'Lung adenocarcinoma', 'IGC'),
'6A': ('University of Kansas', 'Lung squamous cell carcinoma', 'NCH'),
'6D': ('University of Oklahoma HSC', 'Kidney renal clear cell carcinoma', 'NCH'),
'6G': ('University of Sao Paulo', 'Rectum adenocarcinoma', 'NCH'),
'70': ('ILSBio', 'Lung squamous cell carcinoma', 'IGC'),
'71': ('ILSBio', 'Lung adenocarcinoma', 'IGC'),
'72': ('NCH', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'73': ('Roswell Park', 'Lung adenocarcinoma', 'IGC'),
'74': ('Swedish Neurosciences', 'Glioblastoma multiforme', 'IGC'),
'75': ('Ontario Institute for Cancer Research (OICR)', 'Lung adenocarcinoma', 'IGC'),
'76': ('Thomas Jefferson University', 'Glioblastoma multiforme', 'IGC'),
'77': ('Prince Charles Hospital', 'Lung squamous cell carcinoma', 'IGC'),
'78': ('Prince Charles Hospital', 'Lung adenocarcinoma', 'IGC'),
'79': ('Ontario Institute for Cancer Research (OICR)/Ottawa', 'Lung squamous cell carcinoma', 'IGC'),
'80': ('Ontario Institute for Cancer Research (OICR)/Ottawa', 'Lung adenocarcinoma', 'IGC'),
'81': ('CHI-Penrose Colorado', 'Glioblastoma multiforme', 'IGC'),
'82': ('CHI-Penrose Colorado', 'Lung squamous cell carcinoma', 'IGC'),
'83': ('CHI-Penrose Colorado', 'Lung adenocarcinoma', 'IGC'),
'85': ('Asterand', 'Lung squamous cell carcinoma', 'IGC'),
'86': ('Asterand', 'Lung adenocarcinoma', 'IGC'),
'87': ('International Genomics Consortium', 'Glioblastoma multiforme', 'IGC'),
'90': ('ABS - IUPUI', 'Lung squamous cell carcinoma', 'IGC'),
'91': ('ABS - IUPUI', 'Lung adenocarcinoma', 'IGC'),
'92': ('Washington University - St. Louis', 'Lung squamous cell carcinoma', 'IGC'),
'93': ('Washington University - St. Louis', 'Lung adenocarcinoma', 'IGC'),
'94': ('Washington University - Emory', 'Lung squamous cell carcinoma', 'IGC'),
'95': ('Washington University - Emory', 'Lung adenocarcinoma', 'IGC'),
'96': ('Washington University - NYU', 'Lung squamous cell carcinoma', 'IGC'),
'97': ('Washington University - NYU', 'Lung adenocarcinoma', 'IGC'),
'98': ('Washington University - Alabama', 'Lung squamous cell carcinoma', 'IGC'),
'99': ('Washington University - Alabama', 'Lung adenocarcinoma', 'IGC'),
'A1': ('UCSF', 'Breast invasive carcinoma', 'NCH'),
'A2': ('Walter Reed', 'Breast invasive carcinoma', 'NCH'),
'A3': ('International Genomics Consortium', 'Kidney renal clear cell carcinoma', 'IGC'),
'A4': ('International Genomics Consortium', 'Kidney renal papillary cell carcinoma', 'IGC'),
'A5': ('Cedars Sinai', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'A6': ('Christiana Healthcare', 'Colon adenocarcinoma', 'IGC'),
'A7': ('Christiana Healthcare', 'Breast invasive carcinoma', 'NCH'),
'A8': ('Indivumed', 'Breast invasive carcinoma', 'NCH'),
'AA': ('Indivumed', 'Colon adenocarcinoma', 'IGC'),
'AB': ('Washington University', 'Acute Myeloid Leukemia', 'NCH'),
'AC': ('International Genomics Consortium', 'Breast invasive carcinoma', 'NCH'),
'AD': ('International Genomics Consortium', 'Colon adenocarcinoma', 'IGC'),
'AF': ('Christiana Healthcare', 'Rectum adenocarcinoma', 'IGC'),
'AG': ('Indivumed', 'Rectum adenocarcinoma', 'IGC'),
'AH': ('International Genomics Consortium', 'Rectum adenocarcinoma', 'IGC'),
'AJ': ('International Genomics Conosrtium', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AK': ('Fox Chase', 'Kidney renal clear cell carcinoma', 'IGC'),
'AL': ('Fox Chase', 'Kidney renal papillary cell carcinoma', 'IGC'),
'AM': ('Cureline', 'Colon adenocarcinoma', 'IGC'),
'AN': ('Cureline', 'Breast invasive carcinoma', 'NCH'),
'AO': ('MSKCC', 'Breast invasive carcinoma', 'NCH'),
'AP': ('MSKCC', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AQ': ('UNC ', 'Breast invasive carcinoma', 'NCH'),
'AR': ('Mayo', 'Breast invasive carcinoma', 'NCH'),
'AS': ('St. Joseph\'s Medical Center-(MD)', 'Kidney renal clear cell carcinoma', 'IGC'),
'AT': ('St. Joseph\'s Medical Center-(MD)', 'Kidney renal papillary cell carcinoma', 'IGC'),
'AU': ('St. Joseph\'s Medical Center-(MD)', 'Colon adenocarcinoma', 'IGC'),
'AV': ('NCH', 'Cell Line Control', 'NCH'),
'AW': ('Cureline', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AX': ('Gynecologic Oncology Group', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AY': ('UNC', 'Colon adenocarcinoma', 'IGC'),
'AZ': ('University of Pittsburgh', 'Colon adenocarcinoma', 'IGC'),
'B0': ('University of Pittsburgh', 'Kidney renal clear cell carcinoma', 'IGC'),
'B1': ('University of Pittsburgh', 'Kidney renal papillary cell carcinoma', 'IGC'),
'B2': ('Christiana Healthcare', 'Kidney renal clear cell carcinoma', 'IGC'),
'B3': ('Christiana Healthcare', 'Kidney renal papillary cell carcinoma', 'IGC'),
'B4': ('Cureline', 'Kidney renal clear cell carcinoma', 'IGC'),
'B5': ('Duke', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'B6': ('Duke', 'Breast invasive carcinoma', 'NCH'),
'B7': ('Cureline', 'Stomach adenocarcinoma', 'IGC'),
'B8': ('UNC', 'Kidney renal clear cell carcinoma', 'IGC'),
'B9': ('UNC', 'Kidney renal papillary cell carcinoma', 'IGC'),
'BA': ('UNC', 'Head and Neck squamous cell carcinoma', 'IGC'),
'BB': ('Johns Hopkins', 'Head and Neck squamous cell carcinoma', 'IGC'),
'BC': ('UNC', 'Liver hepatocellular carcinoma', 'NCH'),
'BD': ('University of Pittsburgh', 'Liver hepatocellular carcinoma', 'NCH'),
'BF': ('Cureline', 'Skin Cutaneous Melanoma', 'NCH'),
'BG': ('University of Pittsburgh', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'BH': ('University of Pittsburgh', 'Breast invasive carcinoma', 'NCH'),
'BI': ('University of Pittsburgh', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'BJ': ('University of Pittsburgh', 'Thyroid carcinoma', 'IGC'),
'BK': ('Christiana Healthcare', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'BL': ('Christiana Healthcare', 'Bladder Urothelial Carcinoma', 'NCH'),
'BM': ('UNC', 'Rectum adenocarcinoma', 'IGC'),
'BP': ('MSKCC', 'Kidney renal clear cell carcinoma', 'IGC'),
'BQ': ('MSKCC', 'Kidney renal papillary cell carcinoma', 'IGC'),
'BR': ('Asterand', 'Stomach adenocarcinoma', 'IGC'),
'BS': ('University of Hawaii', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'BT': ('University of Pittsburgh', 'Bladder Urothelial Carcinoma', 'NCH'),
'BW': ('St. Joseph\'s Medical Center-(MD)', 'Liver hepatocellular carcinoma', 'NCH'),
'C4': ('Indivumed', 'Bladder Urothelial Carcinoma', 'NCH'),
'C5': ('Medical College of Wisconsin', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'C8': ('ILSBio', 'Breast invasive carcinoma', 'NCH'),
'C9': ('ILSBio', 'Head and Neck squamous cell carcinoma', 'NCH'),
'CA': ('ILSBio', 'Colon adenocarcinoma', 'IGC'),
'CB': ('ILSBio', 'Kidney renal clear cell carcinoma', 'IGC'),
'CC': ('ILSBio', 'Liver hepatocellular carcinoma', 'NCH'),
'CD': ('ILSBio', 'Stomach adenocarcinoma', 'IGC'),
'CE': ('ILSBio', 'Thyroid carcinoma', 'IGC'),
'CF': ('ILSBio', 'Bladder Urothelial Carcinoma', 'NCH'),
'CG': ('Indivumed', 'Stomach adenocarcinoma', 'IGC'),
'CH': ('Indivumed', 'Prostate adenocarcinoma', 'IGC'),
'CI': ('University of Pittsburgh', 'Rectum adenocarcinoma', 'IGC'),
'CJ': ('MD Anderson Cancer Center', 'Kidney renal clear cell carcinoma', 'IGC'),
'CK': ('Harvard', 'Colon adenocarcinoma', 'IGC'),
'CL': ('Harvard', 'Rectum adenocarcinoma', 'IGC'),
'CM': ('MSKCC', 'Colon adenocarcinoma', 'IGC'),
'CN': ('University of Pittsburgh', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CQ': ('University Health Network, Toronto', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CR': ('Vanderbilt University', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CS': ('Thomas Jefferson University', 'Brain Lower Grade Glioma', 'IGC'),
'CU': ('UNC', 'Bladder Urothelial Carcinoma', 'NCH'),
'CV': ('MD Anderson Cancer Center', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CW': ('Mayo Clinic - Rochester', 'Kidney renal clear cell carcinoma', 'IGC'),
'CX': ('Medical College of Georgia', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CZ': ('Harvard', 'Kidney renal clear cell carcinoma', 'IGC'),
'D1': ('Mayo Clinic', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'D3': ('MD Anderson', 'Skin Cutaneous Melanoma', 'NCH'),
'D5': ('Greater Poland Cancer Center', 'Colon adenocarcinoma', 'IGC'),
'D6': ('Greater Poland Cancer Center', 'Head and Neck squamous cell carcinoma', 'IGC'),
'D7': ('Greater Poland Cancer Center', 'Stomach adenocarcinoma', 'IGC'),
'D8': ('Greater Poland Cancer Center', 'Breast invasive carcinoma', 'NCH'),
'D9': ('Greater Poland Cancer Center', 'Skin Cutaneous Melanoma', 'NCH'),
'DA': ('Yale', 'Skin Cutaneous Melanoma', 'NCH'),
'DB': ('Mayo Clinic - Rochester', 'Brain Lower Grade Glioma', 'IGC'),
'DC': ('MSKCC', 'Rectum adenocarcinoma', 'IGC'),
'DD': ('Mayo Clinic - Rochester', 'Liver hepatocellular carcinoma', 'NCH'),
'DE': ('University of North Carolina', 'Thyroid carcinoma', 'NCH'),
'DF': ('Ontario Institute for Cancer Research', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'DG': ('Ontario Institute for Cancer Research', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'DH': ('University of Florida', 'Brain Lower Grade Glioma', 'IGC'),
'DI': ('MD Anderson', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'DJ': ('Memorial Sloan Kettering', 'Thyroid carcinoma', 'NCH'),
'DK': ('Memorial Sloan Kettering', 'Bladder Urothelial Carcinoma', 'NCH'),
'DM': ('University Of Michigan', 'Colon adenocarcinoma', 'NCH'),
'DO': ('Medical College of Georgia', 'Thyroid carcinoma', 'NCH'),
'DQ': ('University Of Michigan', 'Head and Neck squamous cell carcinoma', 'IGC'),
'DR': ('University of Hawaii', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'DS': ('Cedars Sinai', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'DT': ('ILSBio', 'Rectum adenocarcinoma', 'IGC'),
'DU': ('Henry Ford Hospital', 'Brain Lower Grade Glioma', 'IGC'),
'DV': ('NCI Urologic Oncology Branch', 'Kidney renal clear cell carcinoma', 'IGC'),
'DW': ('NCI Urologic Oncology Branch', 'Kidney renal papillary cell carcinoma', 'IGC'),
'DX': ('Memorial Sloan Kettering', 'Sarcoma', 'NCH'),
'DY': ('University Of Michigan', 'Rectum adenocarcinoma', 'NCH'),
'DZ': ('Mayo Clinic - Rochester', 'Kidney renal papillary cell carcinoma', 'IGC'),
'E1': ('Duke', 'Brain Lower Grade Glioma', 'IGC'),
'E2': ('Roswell Park', 'Breast invasive carcinoma', 'NCH'),
'E3': ('Roswell Park', 'Thyroid carcinoma', 'NCH'),
'E5': ('Roswell Park', 'Bladder Urothelial Carcinoma', 'NCH'),
'E6': ('Roswell Park', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'E7': ('Asterand', 'Bladder Urothelial Carcinoma', 'NCH'),
'E8': ('Asterand', 'Thyroid carcinoma', 'NCH'),
'E9': ('Asterand', 'Breast invasive carcinoma', 'NCH'),
'EA': ('Asterand', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'EB': ('Asterand', 'Skin Cutaneous Melanoma', 'NCH'),
'EC': ('Asterand', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'ED': ('Asterand', 'Liver hepatocellular carcinoma', 'NCH'),
'EE': ('University of Sydney', 'Skin Cutaneous Melanoma', 'NCH'),
'EF': ('Cureline', 'Rectum adenocarcinoma', 'IGC'),
'EI': ('Greater Poland Cancer Center', 'Rectum adenocarcinoma', 'IGC'),
'EJ': ('University of Pittsburgh', 'Prostate adenocarcinoma', 'IGC'),
'EK': ('Gynecologic Oncology Group', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'EL': ('MD Anderson', 'Thyroid carcinoma', 'NCH'),
'EM': ('University Health Network', 'Thyroid carcinoma', 'NCH'),
'EO': ('University Health Network', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'EP': ('Christiana Healthcare', 'Liver hepatocellular carcinoma', 'NCH'),
'EQ': ('Christiana Healthcare', 'Stomach adenocarcinoma', 'IGC'),
'ER': ('University of Pittsburgh', 'Skin Cutaneous Melanoma', 'NCH'),
'ES': ('University of Florida', 'Liver hepatocellular carcinoma', 'NCH'),
'ET': ('Johns Hopkins', 'Thyroid carcinoma', 'NCH'),
'EU': ('CHI-Penrose Colorado', 'Kidney renal clear cell carcinoma', 'IGC'),
'EV': ('CHI-Penrose Colorado', 'Kidney renal papillary cell carcinoma', 'IGC'),
'EW': ('University of Miami', 'Breast invasive carcinoma', 'NCH'),
'EX': ('University of North Carolina', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'EY': ('University of North Carolina', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'EZ': ('UNC', 'Brain Lower Grade Glioma', 'IGC'),
'F1': ('UNC', 'Stomach adenocarcinoma', 'IGC'),
'F2': ('UNC', 'Pancreatic adenocarcinoma', 'IGC'),
'F4': ('Asterand', 'Colon adenocarcinoma', 'IGC'),
'F5': ('Asterand', 'Rectum adenocarcinoma', 'IGC'),
'F6': ('Asterand', 'Brain Lower Grade Glioma', 'IGC'),
'F7': ('Asterand', 'Head and Neck squamous cell carcinoma', 'IGC'),
'F9': ('Asterand', 'Kidney renal papillary cell carcinoma', 'IGC'),
'FA': ('Asterand', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'FB': ('Asterand', 'Pancreatic adenocarcinoma', 'IGC'),
'FC': ('Asterand', 'Prostate adenocarcinoma', 'IGC'),
'FD': ('BLN - University Of Chicago', 'Bladder Urothelial Carcinoma', 'NCH'),
'FE': ('Ohio State University', 'Thyroid carcinoma', 'NCH'),
'FF': ('SingHealth', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'FG': ('Case Western', 'Brain Lower Grade Glioma', 'IGC'),
'FH': ('CHI-Penrose Colorado', 'Thyroid carcinoma', 'NCH'),
'FI': ('Washington University', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'FJ': ('BLN - Baylor', 'Bladder Urothelial Carcinoma', 'NCH'),
'FK': ('Johns Hopkins', 'Thyroid carcinoma', 'NCH'),
'FL': ('University of Hawaii - Normal Study', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'FM': ('International Genomics Consortium', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'FN': ('International Genomics Consortium', 'Brain Lower Grade Glioma', 'IGC'),
'FP': ('International Genomics Consortium', 'Stomach adenocarcinoma', 'IGC'),
'FQ': ('Johns Hopkins', 'Pancreatic adenocarcinoma', 'IGC'),
'FR': ('University of North Carolina', 'Skin Cutaneous Melanoma', 'NCH'),
'FS': ('Essen', 'Skin Cutaneous Melanoma', 'NCH'),
'FT': ('BLN - University of Miami', 'Bladder Urothelial Carcinoma', 'NCH'),
'FU': ('International Genomics Consortium', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'FV': ('International Genomics Consortium', 'Liver hepatocellular carcinoma', 'NCH'),
'FW': ('International Genomics Consortium', 'Skin Cutaneous Melanoma', 'NCH'),
'FX': ('International Genomics Consortium', 'Sarcoma', 'NCH'),
'FY': ('International Genomics Consortium', 'Thyroid carcinoma', 'NCH'),
'FZ': ('University of Pittsburgh', 'Pancreatic adenocarcinoma', 'IGC'),
'G2': ('MD Anderson', 'Bladder Urothelial Carcinoma', 'NCH'),
'G3': ('Alberta Health Services', 'Liver hepatocellular carcinoma', 'NCH'),
'G4': ('Roswell Park', 'Colon adenocarcinoma', 'IGC'),
'G5': ('Roswell Park', 'Rectum adenocarcinoma', 'IGC'),
'G6': ('Roswell Park', 'Kidney renal clear cell carcinoma', 'IGC'),
'G7': ('Roswell Park', 'Kidney renal papillary cell carcinoma', 'IGC'),
'G8': ('Roswell Park', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'G9': ('Roswell Park', 'Prostate adenocarcinoma', 'IGC'),
'GC': ('International Genomics Consortium', 'Bladder Urothelial Carcinoma', 'NCH'),
'GD': ('ABS - IUPUI', 'Bladder Urothelial Carcinoma', 'NCH'),
'GE': ('ABS - IUPUI', 'Thyroid carcinoma', 'NCH'),
'GF': ('ABS - IUPUI', 'Skin Cutaneous Melanoma', 'NCH'),
'GG': ('ABS - IUPUI', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'GH': ('ABS - IUPUI', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'GI': ('ABS - IUPUI', 'Breast invasive carcinoma', 'NCH'),
'GJ': ('ABS - IUPUI', 'Liver hepatocellular carcinoma', 'NCH'),
'GK': ('ABS - IUPUI', 'Kidney renal clear cell carcinoma', 'IGC'),
'GL': ('ABS - IUPUI', 'Kidney renal papillary cell carcinoma', 'IGC'),
'GM': ('MD Anderson', 'Breast invasive carcinoma', 'NCH'),
'GN': ('Roswell', 'Skin Cutaneous Melanoma', 'NCH'),
'GP': ('MD Anderson', 'Acute Myeloid Leukemia', 'NCH'),
'GR': ('University of Nebraska Medical Center (UNMC)', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'GS': ('Fundacio Clinic per a la Recerca Biomedica', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'GU': ('BLN - UT Southwestern Medical Center at Dallas', 'Bladder Urothelial Carcinoma', 'NCH'),
'GV': ('BLN - Cleveland Clinic', 'Bladder Urothelial Carcinoma', 'NCH'),
'GZ': ('BC Cancer Agency', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'H1': ('Medical College of Georgia', 'Stomach adenocarcinoma', 'IGC'),
'H2': ('Christiana Healthcare', 'Thyroid carcinoma', 'NCH'),
'H3': ('ABS - IUPUI', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'H4': ('Medical College of Georgia', 'Bladder Urothelial Carcinoma', 'NCH'),
'H5': ('Medical College of Georgia', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'H6': ('Christiana Healthcare', 'Pancreatic adenocarcinoma', 'IGC'),
'H7': ('ABS - IUPUI', 'Head and Neck squamous cell carcinoma', 'IGC'),
'H8': ('ABS - IUPUI', 'Pancreatic adenocarcinoma', 'IGC'),
'H9': ('ABS - IUPUI', 'Prostate adenocarcinoma', 'IGC'),
'HA': ('Alberta Health Services', 'Stomach adenocarcinoma', 'IGC'),
'HB': ('University of North Carolina', 'Sarcoma', 'NCH'),
'HC': ('International Genomics Consortium', 'Prostate adenocarcinoma', 'IGC'),
'HD': ('International Genomics Consortium', 'Head and Neck squamous cell carcinoma', 'IGC'),
'HE': ('Ontario Institute for Cancer Research (OICR)', 'Kidney renal papillary cell carcinoma', 'IGC'),
'HF': ('Ontario Institute for Cancer Research (OICR)', 'Stomach adenocarcinoma', 'IGC'),
'HG': ('Roswell Park', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'HH': ('Fox Chase', 'Stomach adenocarcinoma', 'IGC'),
'HI': ('Fox Chase', 'Prostate adenocarcinoma', 'IGC'),
'HJ': ('Fox Chase', 'Stomach adenocarcinoma', 'IGC'),
'HK': ('Fox Chase', 'Brain Lower Grade Glioma', 'IGC'),
'HL': ('Fox Chase', 'Head and Neck squamous cell carcinoma', 'IGC'),
'HM': ('Christiana Healthcare', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'HN': ('Ontario Institute for Cancer Research (OICR)', 'Breast invasive carcinoma', 'NCH'),
'HP': ('Ontario Institute for Cancer Research (OICR)', 'Liver hepatocellular carcinoma', 'NCH'),
'HQ': ('Ontario Institute for Cancer Research (OICR)', 'Bladder Urothelial Carcinoma', 'NCH'),
'HR': ('Ontario Institute for Cancer Research (OICR)', 'Skin Cutaneous Melanoma', 'NCH'),
'HS': ('Ontario Institute for Cancer Research (OICR)', 'Sarcoma', 'NCH'),
'HT': ('Case Western - St Joes', 'Brain Lower Grade Glioma', 'IGC'),
'HU': ('National Cancer Center Korea', 'Stomach adenocarcinoma', 'IGC'),
'HV': ('National Cancer Center Korea', 'Pancreatic adenocarcinoma', 'IGC'),
'HW': ('MSKCC', 'Brain Lower Grade Glioma', 'IGC'),
'HZ': ('International Genomics Consortium', 'Pancreatic adenocarcinoma', 'IGC'),
'IA': ('Cleveland Clinic', 'Kidney renal papillary cell carcinoma', 'IGC'),
'IB': ('Alberta Health Services', 'Pancreatic adenocarcinoma', 'IGC'),
'IC': ('University of Pittsburgh', 'Esophageal carcinoma ', 'NCH'),
'IE': ('ABS - IUPUI', 'Sarcoma', 'NCH'),
'IF': ('University of Texas MD Anderson Cancer Center', 'Sarcoma', 'NCH'),
'IG': ('Asterand', 'Esophageal carcinoma ', 'NCH'),
'IH': ('University of Miami', 'Skin Cutaneous Melanoma', 'NCH'),
'IJ': ('Christiana Healthcare', 'Acute Myeloid Leukemia', 'NCH'),
'IK': ('Christiana Healthcare', 'Brain Lower Grade Glioma', 'IGC'),
'IM': ('University of Miami', 'Thyroid carcinoma', 'NCH'),
'IN': ('University of Pittsburgh', 'Stomach adenocarcinoma', 'IGC'),
'IP': ('ABS - IUPUI', 'Stomach adenocarcinoma', 'IGC'),
'IQ': ('University of Miami', 'Head and Neck squamous cell carcinoma', 'IGC'),
'IR': ('Memorial Sloan Kettering', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'IS': ('Memorial Sloan Kettering', 'Sarcoma', 'NCH'),
'IW': ('Cedars Sinai', 'Sarcoma', 'NCH'),
'IZ': ('ABS - Lahey Clinic', 'Kidney renal papillary cell carcinoma', 'IGC'),
'J1': ('ABS - Lahey Clinic', 'Lung squamous cell carcinoma', 'IGC'),
'J2': ('ABS - Lahey Clinic', 'Lung adenocarcinoma', 'IGC'),
'J4': ('ABS - Lahey Clinic', 'Prostate adenocarcinoma', 'IGC'),
'J7': ('ILSBio', 'Kidney renal papillary cell carcinoma', 'IGC'),
'J8': ('Mayo Clinic', 'Thyroid carcinoma', 'NCH'),
'J9': ('Melbourne Health', 'Prostate adenocarcinoma', 'IGC'),
'JA': ('ABS - Research Metrics Pakistan', 'Head and Neck squamous cell carcinoma', 'IGC'),
'JL': ('ABS - Research Metrics Pakistan', 'Breast invasive carcinoma', 'NCH'),
'JU': ('BLN - Baylor', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'JV': ('BLN - Baylor', 'Sarcoma', 'NCH'),
'JW': ('BLN - Baylor', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'JX': ('Washington University', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'JY': ('University Health Network', 'Esophageal carcinoma ', 'NCH'),
'JZ': ('University of Rochester', 'Esophageal carcinoma ', 'NCH'),
'K1': ('University of Pittsburgh', 'Sarcoma', 'NCH'),
'K4': ('ABS - Lahey Clinic', 'Bladder Urothelial Carcinoma', 'NCH'),
'K6': ('ABS - Lahey Clinic', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'K7': ('ABS - Lahey Clinic', 'Liver hepatocellular carcinoma', 'NCH'),
'K8': ('ABS - Lahey Clinic', 'Skin Cutaneous Melanoma', 'NCH'),
'KA': ('ABS - Lahey Clinic', 'Esophageal carcinoma ', 'NCH'),
'KB': ('University Health Network, Toronto', 'Stomach adenocarcinoma', 'IGC'),
'KC': ('Cornell Medical College', 'Prostate adenocarcinoma', 'IGC'),
'KD': ('Mount Sinai School of Medicine', 'Sarcoma', 'NCH'),
'KE': ('Mount Sinai School of Medicine', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'KF': ('Christiana Healthcare', 'Sarcoma', 'NCH'),
'KG': ('Baylor Network', 'Pancreatic adenocarcinoma', 'IGC'),
'KH': ('Memorial Sloan Kettering', 'Esophageal carcinoma ', 'NCH'),
'KJ': ('University of Miami', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'KK': ('MD Anderson Cancer Center', 'Prostate adenocarcinoma', 'IGC'),
'KL': ('MSKCC', 'Kidney Chromophobe', 'IGC'),
'KM': ('NCI Urologic Oncology Branch', 'Kidney Chromophobe', 'IGC'),
'KN': ('Harvard', 'Kidney Chromophobe', 'IGC'),
'KO': ('MD Anderson Cancer Center', 'Kidney Chromophobe', 'IGC'),
'KP': ('British Columbia Cancer Agency', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'KQ': ('Cornell Medical College', 'Bladder Urothelial Carcinoma', 'NCH'),
'KR': ('University Of Michigan', 'Liver hepatocellular carcinoma', 'NCH'),
'KS': ('University Of Michigan', 'Thyroid carcinoma', 'NCH'),
'KT': ('Hartford', 'Brain Lower Grade Glioma', 'IGC'),
'KU': ('Hartford', 'Head and Neck squamous cell carcinoma', 'IGC'),
'KV': ('Hartford', 'Kidney renal papillary cell carcinoma', 'IGC'),
'KZ': ('Hartford', 'Stomach adenocarcinoma', 'IGC'),
'L1': ('Hartford', 'Pancreatic adenocarcinoma', 'IGC'),
'L3': ('Gundersen Lutheran Health System', 'Lung squamous cell carcinoma', 'IGC'),
'L4': ('Gundersen Lutheran Health System', 'Lung adenocarcinoma', 'IGC'),
'L5': ('University of Michigan', 'Esophageal carcinoma ', 'NCH'),
'L6': ('National Institutes of Health', 'Thyroid carcinoma', 'NCH'),
'L7': ('Christiana Care', 'Esophageal carcinoma ', 'NCH'),
'L8': ('University of Miami', 'Kidney renal papillary cell carcinoma', 'NCH'),
'L9': ('Candler', 'Lung adenocarcinoma', 'IGC'),
'LA': ('Candler', 'Lung squamous cell carcinoma', 'IGC'),
'LB': ('Candler', 'Pancreatic adenocarcinoma', 'IGC'),
'LC': ('Hartford Hospital', 'Bladder Urothelial Carcinoma', 'NCH'),
'LD': ('Hartford Hospital', 'Breast invasive carcinoma', 'NCH'),
'LG': ('Hartford Hospital', 'Liver hepatocellular carcinoma', 'NCH'),
'LH': ('Hartford Hospital', 'Skin Cutaneous Melanoma', 'NCH'),
'LI': ('Hartford Hospital', 'Sarcoma', 'NCH'),
'LK': ('University of Pittsburgh', 'Mesothelioma', 'NCH'),
'LL': ('Candler', 'Breast invasive carcinoma', 'NCH'),
'LN': ('ILSBIO', 'Esophageal carcinoma ', 'NCH'),
'LP': ('ILSBIO', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'LQ': ('Gundersen Lutheran Health System', 'Breast invasive carcinoma', 'NCH'),
'LS': ('Gundersen Lutheran Health System', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'LT': ('Gundersen Lutheran Health System', 'Bladder Urothelial Carcinoma', 'NCH'),
'M7': ('University of North Carolina', 'Prostate adenocarcinoma', 'NCH'),
'M8': ('Ontario Institute for Cancer Research (OICR)', 'Pancreatic adenocarcinoma', 'NCH'),
'M9': ('Ontario Institute for Cancer Research (OICR)', 'Esophageal carcinoma ', 'NCH'),
'MA': ('MD Anderson Cancer Center', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'MB': ('University of Minnesota', 'Sarcoma', 'NCH'),
'ME': ('University of Minnesota', 'Lung adenocarcinoma', 'NCH'),
'MF': ('University of Minnesota', 'Lung squamous cell carcinoma', 'NCH'),
'MG': ('BLN - Baylor', 'Prostate adenocarcinoma', 'NCH'),
'MH': ('BLN - Baylor', 'Kidney renal papillary cell carcinoma', 'NCH'),
'MI': ('BLN - Baylor', 'Liver hepatocellular carcinoma', 'NCH'),
'MJ': ('BLN - Baylor', 'Sarcoma', 'NCH'),
'MK': ('BLN - Baylor', 'Thyroid carcinoma', 'NCH'),
'ML': ('BLN - Baylor', 'Lung squamous cell carcinoma', 'NCH'),
'MM': ('BLN - Baylor', 'Kidney renal clear cell carcinoma', 'NCH'),
'MN': ('BLN - Baylor', 'Lung adenocarcinoma', 'NCH'),
'MO': ('ILSBio', 'Sarcoma', 'NCH'),
'MP': ('Washington University - Mayo Clinic', 'Lung adenocarcinoma', 'NCH'),
'MQ': ('Washington University - NYU', 'Mesothelioma', 'NCH'),
'MR': ('University of Minnesota', 'Liver hepatocellular carcinoma', 'NCH'),
'MS': ('University of Minnesota', 'Breast invasive carcinoma', 'NCH'),
'MT': ('University of Minnesota', 'Head and Neck squamous cell carcinoma', 'NCH'),
'MU': ('University of Minnesota', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'MV': ('University of Minnesota', 'Bladder Urothelial Carcinoma', 'NCH'),
'MW': ('University of Miami', 'Kidney renal clear cell carcinoma', 'NCH'),
'MX': ('MSKCC', 'Stomach adenocarcinoma', 'NCH'),
'MY': ('Montefiore Medical Center', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'MZ': ('Montefiore Medical Center', 'Head and Neck squamous cell carcinoma', 'NCH'),
'N1': ('Montefiore Medical Center', 'Sarcoma', 'NCH'),
'N5': ('MSKCC', 'Uterine Carcinosarcoma', 'NCH'),
'N6': ('University of Pittsburgh', 'Uterine Carcinosarcoma', 'NCH'),
'N7': ('Washington University', 'Uterine Carcinosarcoma', 'NCH'),
'N8': ('University of North Carolina', 'Uterine Carcinosarcoma', 'NCH'),
'N9': ('MD Anderson', 'Uterine Carcinosarcoma', 'NCH'),
'NA': ('Duke University', 'Uterine Carcinosarcoma', 'NCH'),
'NB': ('Washington University - CHUV', 'Lung adenocarcinoma', 'NCH'),
'NC': ('Washington University - CHUV', 'Lung squamous cell carcinoma', 'NCH'),
'ND': ('Cedars Sinai', 'Uterine Carcinosarcoma', 'NCH'),
'NF': ('Mayo Clinic - Rochester', 'Uterine Carcinosarcoma', 'NCH'),
'NG': ('Roswell Park', 'Uterine Carcinosarcoma', 'NCH'),
'NH': ('Candler', 'Colon adenocarcinoma', 'NCH'),
'NI': ('Roswell Park', 'Liver hepatocellular carcinoma', 'NCH'),
'NJ': ('Washington University - Rush University', 'Lung adenocarcinoma', 'NCH'),
'NK': ('Washington University - Rush University', 'Lung squamous cell carcinoma', 'NCH'),
'NM': ('Cambridge BioSource', 'Head and Neck squamous cell carcinoma', 'NCH'),
'NP': ('International Genomics Consortium', 'Kidney Chromophobe', 'NCH'),
'NQ': ('International Genomics Consortium', 'Mesothelioma', 'NCH'),
'NS': ('Gundersen Lutheran Health System', 'Skin Cutaneous Melanoma', 'NCH'),
'O1': ('Washington University - CALGB', 'Lung adenocarcinoma', 'NCH'),
'O2': ('Washington University - CALGB', 'Lung squamous cell carcinoma', 'NCH'),
'O8': ('Saint Mary\'s Health Care', 'Liver hepatocellular carcinoma', 'NCH'),
'O9': ('Saint Mary\'s Health Care', 'Kidney renal papillary cell carcinoma', 'NCH'),
'OC': ('Saint Mary\'s Health Care', 'Lung squamous cell carcinoma', 'NCH'),
'OD': ('Saint Mary\'s Health Care', 'Skin Cutaneous Melanoma', 'NCH'),
'OE': ('Saint Mary\'s Health Care', 'Pancreatic adenocarcinoma', 'NCH'),
'OJ': ('Saint Mary\'s Health Care', 'Thyroid carcinoma', 'NCH'),
'OK': ('Mount Sinai School of Medicine', 'Breast invasive carcinoma', 'NCH'),
'OL': ('University of Chicago', 'Breast invasive carcinoma', 'NCH'),
'OR': ('University of Michigan', 'Adrenocortical carcinoma', 'NCH'),
'OU': ('Roswell Park', 'Adrenocortical carcinoma', 'NCH'),
'OW': ('International Genomics Consortium', 'Miscellaneous', 'NCH'),
'OX': ('University of North Carolina', 'Glioblastoma multiforme', 'NCH'),
'OY': ('University of North Carolina', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'P3': ('Fred Hutchinson', 'Head and Neck squamous cell carcinoma', 'NCH'),
'P4': ('MD Anderson Cancer Center', 'Kidney renal papillary cell carcinoma', 'NCH'),
'P5': ('Cureline', 'Brain Lower Grade Glioma', 'NCH'),
'P6': ('Translational Genomics Research Institute', 'Adrenocortical carcinoma', 'NCH'),
'P7': ('Translational Genomics Research Institute', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'P8': ('University of Pittsburgh', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'P9': ('University of Minnesota', 'Pancreatic adenocarcinoma', 'NCH'),
'PA': ('University of Minnesota', 'Adrenocortical carcinoma', 'NCH'),
'PB': ('University of Minnesota', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'NCH'),
'PC': ('Fox Chase', 'Sarcoma', 'NCH'),
'PD': ('Fox Chase', 'Liver hepatocellular carcinoma', 'NCH'),
'PE': ('Fox Chase', 'Breast invasive carcinoma', 'NCH'),
'PG': ('Montefiore Medical Center', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'PH': ('Gundersen Lutheran', 'Acute Myeloid Leukemia', 'NCH'),
'PJ': ('Gundersen Lutheran', 'Kidney renal papillary cell carcinoma', 'NCH'),
'PK': ('University Health Network', 'Adrenocortical carcinoma', 'NCH'),
'PL': ('Institute of Human Virology Nigeria', 'Breast invasive carcinoma', 'NCH'),
'PN': ('Institute of Human Virology Nigeria', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'PQ': ('University of Colorado Denver', 'Bladder Urothelial Carcinoma', 'NCH'),
'PR': ('Roswell Park', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'PT': ('Maine Medical Center', 'Sarcoma', 'NCH'),
'PZ': ('ABS - Lahey Clinic', 'Pancreatic adenocarcinoma', 'NCH'),
'Q1': ('University of Oklahoma HSC', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'Q2': ('University of Oklahoma HSC', 'Kidney renal papillary cell carcinoma', 'NCH'),
'Q3': ('University of Oklahoma HSC', 'Pancreatic adenocarcinoma', 'NCH'),
'Q4': ('Emory University', 'Acute Myeloid Leukemia', 'NCH'),
'Q9': ('Emory University', 'Esophageal carcinoma ', 'NCH'),
'QA': ('Emory University', 'Liver hepatocellular carcinoma', 'NCH'),
'QB': ('Emory University', 'Skin Cutaneous Melanoma', 'NCH'),
'QC': ('Emory University', 'Sarcoma', 'NCH'),
'QD': ('Emory University', 'Thyroid carcinoma', 'NCH'),
'QF': ('BLN - Baylor', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'QG': ('BLN - Baylor', 'Colon adenocarcinoma', 'NCH'),
'QH': ('Fondazione-Besta', 'Brain Lower Grade Glioma', 'NCH'),
'QJ': ('Mount Sinai School of Medicine', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'QK': ('Emory University - Winship Cancer Inst.', 'Head and Neck squamous cell carcinoma', 'NCH'),
'QL': ('University of Chicago', 'Colon adenocarcinoma', 'NCH'),
'QM': ('University of Oklahoma HSC', 'Uterine Carcinosarcoma', 'NCH'),
'QN': ('ILSBio', 'Uterine Carcinosarcoma', 'NCH'),
'QQ': ('Roswell Park', 'Sarcoma', 'NCH'),
'QR': ('National Institutes of Health', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'QS': ('Candler', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'QT': ('University of North Carolina', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'QU': ('Harvard Beth Israel', 'Prostate adenocarcinoma', 'NCH'),
'QV': ('Instituto Nacional de Cancerologia', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'QW': ('Instituto Nacional de Cancerologia', 'Stomach adenocarcinoma', 'NCH'),
'R1': ('CHI-Penrose Colorado', 'Colon adenocarcinoma', 'NCH'),
'R2': ('CHI-Penrose Colorado', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'R3': ('CHI-Penrose Colorado', 'Bladder Urothelial Carcinoma', 'NCH'),
'R5': ('MD Anderson Cancer Center', 'Stomach adenocarcinoma', 'NCH'),
'R6': ('MD Anderson Cancer Center', 'Esophageal carcinoma ', 'NCH'),
'R7': ('Gundersen Lutheran Health System', 'Head and Neck squamous cell carcinoma', 'NCH'),
'R8': ('MD Anderson', 'Brain Lower Grade Glioma', 'NCH'),
'R9': ('Candler', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'RA': ('Candler', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'RB': ('Emory University', 'Pancreatic adenocarcinoma', 'NCH'),
'RC': ('University of Utah', 'Liver hepatocellular carcinoma', 'NCH'),
'RD': ('Peter MacCallum Cancer Center', 'Stomach adenocarcinoma', 'NCH'),
'RE': ('Peter MacCallum Cancer Center', 'Esophageal carcinoma ', 'NCH'),
'RG': ('Montefiore Medical Center', 'Liver hepatocellular carcinoma', 'NCH'),
'RH': ('BLN - Baylor', 'Head and Neck squamous cell carcinoma', 'NCH'),
'RL': ('St. Joseph\'s Hospital AZ', 'Pancreatic adenocarcinoma', 'NCH'),
'RM': ('St. Joseph\'s Hospital AZ', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RN': ('St. Joseph\'s Hospital AZ', 'Sarcoma', 'NCH'),
'RP': ('St. Joseph\'s Hospital AZ', 'Skin Cutaneous Melanoma', 'NCH'),
'RQ': ('St. Joseph\'s Hospital AZ', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'NCH'),
'RR': ('St. Joseph\'s Hospital AZ', 'Glioblastoma multiforme', 'NCH'),
'RS': ('Memorial Sloan Kettering Cancer Center', 'Head and Neck squamous cell carcinoma', 'NCH'),
'RT': ('Cleveland Clinic Foundation', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RU': ('Northwestern University', 'Colon adenocarcinoma', 'NCH'),
'RV': ('Northwestern University', 'Pancreatic adenocarcinoma', 'NCH'),
'RW': ('Michigan University', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RX': ('University of Minnesota', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RY': ('University of California San Francisco', 'Brain Lower Grade Glioma', 'NCH'),
'RZ': ('Wills Eye Institute', 'Uveal Melanoma', 'NCH'),
'S2': ('Albert Einstein Medical Center', 'Lung adenocarcinoma', 'NCH'),
'S3': ('Albert Einstein Medical Center', 'Breast invasive carcinoma', 'NCH'),
'S4': ('University of Chicago', 'Pancreatic adenocarcinoma', 'NCH'),
'S5': ('University of Oklahoma HSC', 'Bladder Urothelial Carcinoma', 'NCH'),
'S6': ('Gundersen Lutheran Health System', 'Testicular Germ Cell Tumors', 'NCH'),
'S7': ('University Hospital Motol', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'S8': ('ABS - IUPUI', 'Esophageal carcinoma ', 'NCH'),
'S9': ('Dept of Neurosurgery at University of Heidelberg', 'Brain Lower Grade Glioma', 'NCH'),
'SA': ('ABS - IUPUI', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SB': ('Baylor College of Medicine', 'Testicular Germ Cell Tumors', 'NCH'),
'SC': ('Memorial Sloan Kettering', 'Mesothelioma', 'NCH'),
'SD': ('MD Anderson', 'Pancreatic adenocarcinoma', 'NCH'),
'SE': ('Boston Medical Center', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SG': ('Cleveland Clinic Foundation', 'Sarcoma', 'NCH'),
'SH': ('Papworth Hospital', 'Mesothelioma', 'NCH'),
'SI': ('Washington University St. Louis', 'Sarcoma', 'NCH'),
'SJ': ('Albert Einstein Medical Center', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'SK': ('St. Joseph\'s Hospital AZ', 'Colon adenocarcinoma', 'NCH'),
'SL': ('St. Joseph\'s Hospital AZ', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'SN': ('BLN - Baylor', 'Testicular Germ Cell Tumors', 'NCH'),
'SO': ('University of Minnesota', 'Testicular Germ Cell Tumors', 'NCH'),
'SP': ('University Health Network', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SQ': ('International Genomics Consortium', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SR': ('Tufts Medical Center', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SS': ('Medical College of Georgia', 'Colon adenocarcinoma', 'NCH'),
'ST': ('Global Bioclinical-Moldova', 'Head and Neck squamous cell carcinoma', 'NCH'),
'SU': ('Global Bioclinical-Moldova', 'Prostate adenocarcinoma', 'NCH'),
'SW': ('Global Bioclinical-Moldova', 'Stomach adenocarcinoma', 'NCH'),
'SX': ('Mayo Clinic Arizona', 'Kidney renal papillary cell carcinoma', 'NCH'),
'SY': ('Mayo Clinic Arizona', 'Bladder Urothelial Carcinoma', 'NCH'),
'T1': ('St. Joseph\'s Hospital Arizona', 'Liver hepatocellular carcinoma', 'NCH'),
'T2': ('St. University of Colorado Denver', 'Head and Neck squamous cell carcinoma', 'NCH'),
'T3': ('Molecular Response', 'Head and Neck squamous cell carcinoma', 'NCH'),
'T6': ('Molecular Response', 'Lung adenocarcinoma', 'NCH'),
'T7': ('Molecular Response', 'Kidney renal clear cell carcinoma', 'NCH'),
'T9': ('Molecular Response', 'Colon adenocarcinoma', 'NCH'),
'TE': ('Global BioClinical - Georgia', 'Skin Cutaneous Melanoma', 'NCH'),
'TG': ('Global BioClinical - Georgia', 'Head and Neck squamous cell carcinoma', 'NCH'),
'TK': ('Global BioClinical - Georgia', 'Prostate adenocarcinoma', 'NCH'),
'TL': ('Global BioClinical - Georgia', 'Stomach adenocarcinoma', 'NCH'),
'TM': ('The University of New South Wales', 'Brain Lower Grade Glioma', 'NCH'),
'TN': ('Ohio State University', 'Head and Neck squamous cell carcinoma', 'NCH'),
'TP': ('Maine Medical Center', 'Prostate adenocarcinoma', 'NCH'),
'TQ': ('University of Sao Paulo', 'Brain Lower Grade Glioma', 'NCH'),
'TR': ('Global Bioclinical-Moldova', 'Skin Cutaneous Melanoma', 'NCH'),
'TS': ('University of Pennsylvania', 'Mesothelioma', 'NCH'),
'TT': ('University of Pennsylvania', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'TV': ('Wake Forest University', 'Breast invasive carcinoma', 'NCH'),
'UB': ('UCSF', 'Liver hepatocellular carcinoma', 'NCH'),
'UC': ('University of Washington', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'UD': ('University of Western Australia', 'Mesothelioma', 'NCH'),
'UE': ('Asterand', 'Sarcoma', 'NCH'),
'UF': ('Barretos Cancer Hospital', 'Head and Neck squamous cell carcinoma', 'NCH'),
'UJ': ('Boston Medical Center', 'Lung squamous cell carcinoma', 'NCH'),
'UL': ('Boston Medical Center', 'Breast invasive carcinoma', 'NCH'),
'UN': ('Boston Medical Center', 'Kidney renal papillary cell carcinoma', 'NCH'),
'UP': ('Boston Medical Center', 'Head and Neck squamous cell carcinoma', 'NCH'),
'UR': ('Boston Medical Center', 'Prostate adenocarcinoma', 'NCH'),
'US': ('Garvan Institute of Medical Research', 'Pancreatic adenocarcinoma', 'NCH'),
'UT': ('Asbestos Diseases Research Institute', 'Mesothelioma', 'NCH'),
'UU': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Breast invasive carcinoma', 'NCH'),
'UV': ('Capital Biosciences', 'Liver hepatocellular carcinoma', 'NCH'),
'UW': ('University of North Carolina', 'Kidney Chromophobe', 'NCH'),
'UY': ('University of California San Francisco', 'Bladder Urothelial Carcinoma', 'NCH'),
'UZ': ('University of California San Francisco', 'Kidney renal papillary cell carcinoma', 'NCH'),
'V1': ('University of California San Francisco', 'Prostate adenocarcinoma', 'NCH'),
'V2': ('Cleveland Clinic Foundation', 'Prostate adenocarcinoma', 'NCH'),
'V3': ('Cleveland Clinic Foundation', 'Uveal Melanoma', 'NCH'),
'V4': ('Institut Curie', 'Uveal Melanoma', 'NCH'),
'V5': ('Duke University', 'Esophageal carcinoma ', 'NCH'),
'V6': ('Duke University', 'Stomach adenocarcinoma', 'NCH'),
'V7': ('Medical College of Georgia', 'Breast invasive carcinoma', 'NCH'),
'V8': ('Medical College of Georgia', 'Kidney renal clear cell carcinoma', 'NCH'),
'V9': ('Medical College of Georgia', 'Kidney renal papillary cell carcinoma', 'NCH'),
'VA': ('Alliance', 'Stomach adenocarcinoma', 'NCH'),
'VB': ('Global BioClinical - Georgia', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'NCH'),
'VD': ('University of Liverpool', 'Uveal Melanoma', 'NCH'),
'VF': ('University of Pennsylvania', 'Testicular Germ Cell Tumors', 'NCH'),
'VG': ('Institute of Human Virology Nigeria', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'VK': ('Institute of Human Virology Nigeria', 'Colon adenocarcinoma', 'NCH'),
'VL': ('Institute of Human Virology Nigeria', 'Rectum adenocarcinoma', 'NCH'),
'VM': ('Huntsman Cancer Institute', 'Brain Lower Grade Glioma', 'NCH'),
'VN': ('NCI Urologic Oncology Branch', 'Prostate adenocarcinoma', 'NCH'),
'VP': ('Washington University', 'Prostate adenocarcinoma', 'NCH'),
'VQ': ('Barretos Cancer Hospital', 'Stomach adenocarcinoma', 'NCH'),
'VR': ('Barretos Cancer Hospital', 'Esophageal carcinoma ', 'NCH'),
'VS': ('Barretos Cancer Hospital', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'VT': ('Vanderbilt', 'Sarcoma', 'NCH'),
'VV': ('John Wayne Cancer Center', 'Brain Lower Grade Glioma', 'NCH'),
'VW': ('Northwestern University', 'Brain Lower Grade Glioma', 'NCH'),
'VX': ('Northwestern University', 'Stomach adenocarcinoma', 'NCH'),
'VZ': ('Albert Einstein Medical Center', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'W2': ('Medical College of Wisconsin', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'W3': ('John Wayne Cancer Center', 'Skin Cutaneous Melanoma', 'NCH'),
'W4': ('University of North Carolina', 'Testicular Germ Cell Tumors', 'NCH'),
'W5': ('Mayo Clinic Rochester', 'Cholangiocarcinoma', 'NCH'),
'W6': ('UCSF', 'Cholangiocarcinoma', 'NCH'),
'W7': ('Garvan Institute of Medical Research', 'Cholangiocarcinoma', 'NCH'),
'W8': ('Greenville Health System', 'Breast invasive carcinoma', 'NCH'),
'W9': ('University of Kansas', 'Brain Lower Grade Glioma', 'NCH'),
'WA': ('University of Schleswig-Holstein', 'Head and Neck squamous cell carcinoma', 'NCH'),
'WB': ('Erasmus MC', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'WC': ('MD Anderson', 'Uveal Melanoma', 'NCH'),
'WD': ('Emory University', 'Cholangiocarcinoma', 'NCH'),
'WE': ('Norfolk and Norwich Hospital', 'Skin Cutaneous Melanoma', 'NCH'),
'WF': ('Greenville Health System', 'Pancreatic adenocarcinoma', 'NCH'),
'WG': ('Greenville Health System', 'Lung squamous cell carcinoma', 'NCH'),
'WH': ('Greenville Health System', 'Brain Lower Grade Glioma', 'NCH'),
'WJ': ('Greenville Health System', 'Liver hepatocellular carcinoma', 'NCH'),
'WK': ('Brigham and Women\'s Hospital', 'Sarcoma', 'NCH'),
'WL': ('University of Kansas', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'WM': ('University of Kansas', 'Kidney renal clear cell carcinoma', 'NCH'),
'WN': ('University of Kansas', 'Kidney renal papillary cell carcinoma', 'NCH'),
'WP': ('University of Kansas', 'Sarcoma', 'NCH'),
'WQ': ('University of Kansas', 'Liver hepatocellular carcinoma', 'NCH'),
'WR': ('University of Kansas', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'WS': ('University of Kansas', 'Colon adenocarcinoma', 'NCH'),
'WT': ('University of Kansas', 'Breast invasive carcinoma', 'NCH'),
'WU': ('Wake Forest University', 'Colon adenocarcinoma', 'NCH'),
'WW': ('Wake Forest University', 'Prostate adenocarcinoma', 'NCH'),
'WX': ('Yale University', 'Liver hepatocellular carcinoma', 'NCH'),
'WY': ('Johns Hopkins', 'Brain Lower Grade Glioma', 'NCH'),
'WZ': ('International Genomics Consortium', 'Testicular Germ Cell Tumors', 'NCH'),
'X2': ('University of Washington', 'Sarcoma', 'NCH'),
'X3': ('Cleveland Clinic Foundation', 'Testicular Germ Cell Tumors', 'NCH'),
'X4': ('Institute for Medical Research', 'Prostate adenocarcinoma', 'NCH'),
'X5': ('Institute of Human Virology Nigeria', 'Bladder Urothelial Carcinoma', 'NCH'),
'X6': ('University of Iowa', 'Sarcoma', 'NCH'),
'X7': ('ABS IUPUI', 'Thymoma', 'NCH'),
'X8': ('St. Joseph\'s Hospital Arizona', 'Esophageal carcinoma ', 'NCH'),
'X9': ('University of California, Davis', 'Sarcoma', 'NCH'),
'XA': ('University of Minnesota', 'Prostate adenocarcinoma', 'NCH'),
'XB': ('Albert Einstein Medical Center', 'Esophageal carcinoma ', 'NCH'),
'XC': ('Albert Einstein Medical Center', 'Lung squamous cell carcinoma', 'NCH'),
'XD': ('Providence Portland Medical Center', 'Pancreatic adenocarcinoma', 'NCH'),
'XE': ('University of Southern California', 'Testicular Germ Cell Tumors', 'NCH'),
'XF': ('University of Southern California', 'Bladder Urothelial Carcinoma', 'NCH'),
'XG': ('BLN UT Southwestern Medical Center at Dallas', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'XH': ('BLN Baylor', 'Thymoma', 'NCH'),
'XJ': ('University of Kansas', 'Prostate adenocarcinoma', 'NCH'),
'XK': ('Mayo Clinic Arizona', 'Prostate adenocarcinoma', 'NCH'),
'XM': ('MSKCC', 'Thymoma', 'NCH'),
'XN': ('University of Sao Paulo', 'Pancreatic adenocarcinoma', 'NCH'),
'XP': ('University of Sao Paulo', 'Esophageal carcinoma ', 'NCH'),
'XQ': ('University of Sao Paulo', 'Prostate adenocarcinoma', 'NCH'),
'XR': ('University of Sao Paulo', 'Liver hepatocellular carcinoma', 'NCH'),
'XS': ('University of Sao Paulo', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'XT': ('Johns Hopkins', 'Mesothelioma', 'NCH'),
'XU': ('University Health Network', 'Thymoma', 'NCH'),
'XV': ('Capital Biosciences', 'Skin Cutaneous Melanoma', 'NCH'),
'XX': ('Spectrum Health', 'Breast invasive carcinoma', 'NCH'),
'XY': ('Spectrum Health', 'Testicular Germ Cell Tumors', 'NCH'),
'Y3': ('University of New Mexico', 'Acute Myeloid Leukemia', 'NCH'),
'Y5': ('University of Arizona', 'Sarcoma', 'NCH'),
'Y6': ('University of Arizona', 'Prostate adenocarcinoma', 'NCH'),
'Y8': ('Spectrum Health', 'Kidney renal papillary cell carcinoma', 'NCH'),
'YA': ('Spectrum Health', 'Liver hepatocellular carcinoma', 'NCH'),
'YB': ('Spectrum Health', 'Pancreatic adenocarcinoma', 'NCH'),
'YC': ('Spectrum Health', 'Bladder Urothelial Carcinoma', 'NCH'),
'YD': ('Spectrum Health', 'Skin Cutaneous Melanoma', 'NCH'),
'YF': ('University of Puerto Rico', 'Bladder Urothelial Carcinoma', 'NCH'),
'YG': ('University of Puerto Rico', 'Skin Cutaneous Melanoma', 'NCH'),
'YH': ('Stanford University', 'Pancreatic adenocarcinoma', 'NCH'),
'YJ': ('Stanford University', 'Prostate adenocarcinoma', 'NCH'),
'YL': ('PROCURE Biobank', 'Prostate adenocarcinoma', 'NCH'),
'YN': ('University of Arizona', 'Skin Cutaneous Melanoma', 'NCH'),
'YR': ('Barretos Cancer Hospital', 'Cholangiocarcinoma', 'NCH'),
'YS': ('Barretos Cancer Hospital', 'Mesothelioma', 'NCH'),
'YT': ('Barretos Cancer Hospital', 'Thymoma', 'NCH'),
'YU': ('Barretos Cancer Hospital', 'Testicular Germ Cell Tumors', 'NCH'),
'YV': ('MSKCC', 'Uveal Melanoma', 'NCH'),
'YW': ('Albert Einstein Medical Center', 'Sarcoma', 'NCH'),
'YX': ('Emory University', 'Stomach adenocarcinoma', 'NCH'),
'YY': ('Roswell Park', 'Pancreatic adenocarcinoma', 'NCH'),
'YZ': ('The Ohio State University', 'Uveal Melanoma', 'NCH'),
'Z2': ('IDI-IRCCS', 'Skin Cutaneous Melanoma', 'NCH'),
'Z3': ('UCLA', 'Sarcoma', 'NCH'),
'Z4': ('Cureline', 'Sarcoma', 'NCH'),
'Z5': ('Cureline', 'Pancreatic adenocarcinoma', 'NCH'),
'Z6': ('Cureline', 'Esophageal carcinoma ', 'NCH'),
'Z7': ('John Wayne Cancer Center', 'Breast invasive carcinoma', 'NCH'),
'Z8': ('John Wayne Cancer Center', 'Pancreatic adenocarcinoma', 'NCH'),
'ZA': ('Candler', 'Stomach adenocarcinoma', 'NCH'),
'ZB': ('Thoraxklinik', 'Thymoma', 'NCH'),
'ZC': ('University of Mannheim', 'Thymoma', 'NCH'),
'ZD': ('ILSbio', 'Cholangiocarcinoma', 'NCH'),
'ZE': ('Spectrum Health', 'Lung squamous cell carcinoma', 'NCH'),
'ZF': ('University of Sheffield', 'Bladder Urothelial Carcinoma', 'NCH'),
'ZG': ('University Medical Center Hamburg-Eppendorf', 'Prostate adenocarcinoma', 'NCH'),
'ZH': ('University of North Carolina', 'Cholangiocarcinoma', 'NCH'),
'ZJ': ('NCI HRE Branch', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'ZK': ('University of New Mexico', 'Cholangiocarcinoma', 'NCH'),
'ZL': ('Valley Hospital', 'Thymoma', 'NCH'),
'ZM': ('University of Ulm', 'Testicular Germ Cell Tumors', 'NCH'),
'ZN': ('Brigham and Women\'s Hospital Division of Thoracic Surgery', 'Mesothelioma', 'NCH'),
'ZP': ('Medical College of Wisconsin', 'Liver hepatocellular carcinoma', 'NCH'),
'ZQ': ('Tayside Tissue Bank', 'Stomach adenocarcinoma', 'NCH'),
'ZR': ('Tayside Tissue Bank', 'Esophageal carcinoma ', 'NCH'),
'ZS': ('Tayside Tissue Bank', 'Liver hepatocellular carcinoma', 'NCH'),
'ZT': ('International Genomics Consortium', 'Thymoma', 'NCH'),
'ZU': ('Spectrum Health', 'Cholangiocarcinoma', 'NCH'),
'ZW': ('University of Alabama', 'Pancreatic adenocarcinoma', 'NCH'),
'ZX': ('University of Alabama', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
}
SAMPLE_TYPE = {
# 'Code': ('Definition', 'Short Letter Code'),
'01': ('Primary solid Tumor', 'TP'),
'02': ('Recurrent Solid Tumor', 'TR'),
'03': ('Primary Blood Derived Cancer - Peripheral Blood', 'TB'),
'04': ('Recurrent Blood Derived Cancer - Bone Marrow', 'TRBM'),
'05': ('Additional - New Primary', 'TAP'),
'06': ('Metastatic', 'TM'),
'07': ('Additional Metastatic', 'TAM'),
'08': ('Human Tumor Original Cells', 'THOC'),
'09': ('Primary Blood Derived Cancer - Bone Marrow', 'TBM'),
'10': ('Blood Derived Normal', 'NB'),
'11': ('Solid Tissue Normal', 'NT'),
'12': ('Buccal Cell Normal', 'NBC'),
'13': ('EBV Immortalized Normal', 'NEBV'),
'14': ('Bone Marrow Normal', 'NBM'),
'20': ('Control Analyte', 'CELLC'),
'40': ('Recurrent Blood Derived Cancer - Peripheral Blood', 'TRB'),
'50': ('Cell Lines', 'CELL'),
'60': ('Primary Xenograft Tissue', 'XP'),
'61': ('Cell Line Derived Xenograft Tissue', 'XCL'),
}
| 73.087277 | 131 | 0.609473 |
amous cell carcinoma', 'IGC'),
'34': ('University of Pittsburgh', 'Lung squamous cell carcinoma', 'IGC'),
'35': ('Cureline', 'Lung adenocarcinoma', 'IGC'),
'36': ('BC Cancer Agency', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'37': ('Cureline', 'Lung squamous cell carcinoma', 'IGC'),
'38': ('UNC', 'Lung adenocarcinoma', 'IGC'),
'39': ('MSKCC', 'Lung squamous cell carcinoma', 'IGC'),
'3A': ('Moffitt Cancer Center', 'Pancreatic adenocarcinoma', 'NCH'),
'3B': ('Moffitt Cancer Center', 'Sarcoma', 'NCH'),
'3C': ('Columbia University', 'Breast invasive carcinoma', 'NCH'),
'3E': ('Columbia University', 'Pancreatic adenocarcinoma', 'NCH'),
'3G': ('MD Anderson Cancer Center', 'Thymoma', 'NCH'),
'3H': ('MD Anderson Cancer Center', 'Mesothelioma', 'NCH'),
'3J': ('Carle Cancer Center', 'Breast invasive carcinoma', 'NCH'),
'3K': ('Boston Medical Center', 'Liver hepatocellular carcinoma', 'NCH'),
'3L': ('Albert Einstein Medical Center', 'Colon adenocarcinoma', 'NCH'),
'3M': ('University of Kansas Medical Center', 'Stomach adenocarcinoma', 'NCH'),
'3N': ('Greenville Health System', 'Skin Cutaneous Melanoma', 'NCH'),
'3P': ('Greenville Health System', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'3Q': ('Greenville Health Systems', 'Thymoma', 'NCH'),
'3R': ('University of New Mexico', 'Sarcoma', 'NCH'),
'3S': ('University of New Mexico', 'Thymoma', 'NCH'),
'3T': ('Emory University', 'Thymoma', 'NCH'),
'3U': ('University of Chicago', 'Mesothelioma', 'NCH'),
'3W': ('University of California San Diego', 'Sarcoma', 'NCH'),
'3X': ('Alberta Health Services', 'Cholangiocarcinoma', 'NCH'),
'3Z': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Kidney renal clear cell carcinoma', 'NCH'),
'41': ('Christiana Healthcare', 'Glioblastoma multiforme', 'IGC'),
'42': ('Christiana Healthcare', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'43': ('Christiana Healthcare', 'Lung squamous cell carcinoma', 'IGC'),
'44': ('Christiana Healthcare', 'Lung adenocarcinoma', 'IGC'),
'46': ('St. Joseph\'s Medical Center (MD)', 'Lung squamous cell carcinoma', 'IGC'),
'49': ('Johns Hopkins', 'Lung adenocarcinoma', 'IGC'),
'4A': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Kidney renal papillary cell carcinoma', 'NCH'),
'4B': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Lung adenocarcinoma', 'NCH'),
'4C': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Thyroid carcinoma', 'NCH'),
'4D': ('Molecular Response', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'4E': ('Molecular Response', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'4G': ('Sapienza University of Rome', 'Cholangiocarcinoma', 'NCH'),
'4H': ('Proteogenex, Inc.', 'Breast invasive carcinoma', 'NCH'),
'4J': ('Proteogenex, Inc.', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'4K': ('Proteogenex, Inc.', 'Testicular Germ Cell Tumors', 'NCH'),
'4L': ('Proteogenex, Inc.', 'Prostate adenocarcinoma', 'NCH'),
'4N': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Colon adenocarcinoma', 'NCH'),
'4P': ('Duke University', 'Head and Neck squamous cell carcinoma', 'NCH'),
'4Q': ('Duke University', 'Sarcoma', 'NCH'),
'4R': ('Duke University', 'Liver hepatocellular carcinoma', 'NCH'),
'4S': ('Duke University', 'Prostate adenocarcinoma', 'NCH'),
'4T': ('Duke University', 'Colon adenocarcinoma', 'NCH'),
'4V': ('Hospital Louis Pradel', 'Thymoma', 'NCH'),
'4W': ('University of Miami', 'Glioblastoma multiforme', 'NCH'),
'4X': ('Yale University', 'Thymoma', 'NCH'),
'4Y': ('Medical College of Wisconsin', 'Sarcoma', 'NCH'),
'4Z': ('Barretos Cancer Hospital', 'Bladder Urothelial Carcinoma', 'NCH'),
'50': ('University of Pittsburgh', 'Lung adenocarcinoma', 'IGC'),
'51': ('UNC', 'Lung squamous cell carcinoma', 'IGC'),
'52': ('University of Miami', 'Lung squamous cell carcinoma', 'IGC'),
'53': ('University of Miami', 'Lung adenocarcinoma', 'IGC'),
'55': ('International Genomics Consortium', 'Lung adenocarcinoma', 'IGC'),
'56': ('International Genomics Consortium', 'Lung squamous cell carcinoma', 'IGC'),
'57': ('International Genomics Consortium', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'58': ('Thoraxklinik at University Hospital Heidelberg', 'Lung squamous cell carcinoma', 'IGC'),
'59': ('Roswell Park', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'5A': ('Wake Forest University', 'Cholangiocarcinoma', 'NCH'),
'5B': ('Medical College of Wisconsin', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'5C': ('Cureline', 'Liver hepatocellular carcinoma', 'NCH'),
'5D': ('University of Miami', 'Sarcoma', 'NCH'),
'5F': ('Duke University', 'Thyroid carcinoma', 'NCH'),
'5G': ('Cleveland Clinic Foundation', 'Thymoma', 'NCH'),
'5H': ('Retina Consultants Houston', 'Uveal Melanoma', 'NCH'),
'5J': ('Cureline', 'Acute Myeloid Leukemia', 'NCH'),
'5K': ('St. Joseph\'s Hospital AZ', 'Thymoma', 'NCH'),
'5L': ('University of Sao Paulo', 'Breast invasive carcinoma', 'NCH'),
'5M': ('University of Sao Paulo', 'Colon adenocarcinoma', 'NCH'),
'5N': ('University Hospital Erlangen', 'Bladder Urothelial Carcinoma', 'NCH'),
'5P': ('University Hospital Erlangen', 'Kidney renal papillary cell carcinoma', 'NCH'),
'5Q': ('Proteogenex, Inc', 'Pancreatic adenocarcinoma', 'NCH'),
'5R': ('Proteogenex, Inc', 'Liver hepatocellular carcinoma', 'NCH'),
'5S': ('Holy Cross', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'5T': ('Holy Cross', 'Breast invasive carcinoma', 'NCH'),
'5U': ('Regina Elena National Cancer Institute', 'Thymoma', 'NCH'),
'5V': ('Roswell Park', 'Thymoma', 'NCH'),
'5W': ('University of Alabama', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'5X': ('University of Alabama', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'60': ('Roswell Park', 'Lung squamous cell carcinoma', 'IGC'),
'61': ('University of Pittsburgh', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'62': ('Thoraxklinik at University Hospital Heidelberg', 'Lung adenocarcinoma', 'IGC'),
'63': ('Ontario Institute for Cancer Research', 'Lung squamous cell carcinoma', 'IGC'),
'64': ('Fox Chase', 'Lung adenocarcinoma', 'IGC'),
'65': ('Roswell Park', 'Glioblastoma multiforme', 'IGC'),
'66': ('Indivumed', 'Lung squamous cell carcinoma', 'IGC'),
'67': ('St Joseph\'s Medical Center (MD)', 'Lung adenocarcinoma', 'IGC'),
'68': ('Washington University - Cleveland Clinic', 'Lung squamous cell carcinoma', 'IGC'),
'69': ('Washington University - Cleveland Clinic', 'Lung adenocarcinoma', 'IGC'),
'6A': ('University of Kansas', 'Lung squamous cell carcinoma', 'NCH'),
'6D': ('University of Oklahoma HSC', 'Kidney renal clear cell carcinoma', 'NCH'),
'6G': ('University of Sao Paulo', 'Rectum adenocarcinoma', 'NCH'),
'70': ('ILSBio', 'Lung squamous cell carcinoma', 'IGC'),
'71': ('ILSBio', 'Lung adenocarcinoma', 'IGC'),
'72': ('NCH', 'Ovarian serous cystadenocarcinoma', 'IGC'),
'73': ('Roswell Park', 'Lung adenocarcinoma', 'IGC'),
'74': ('Swedish Neurosciences', 'Glioblastoma multiforme', 'IGC'),
'75': ('Ontario Institute for Cancer Research (OICR)', 'Lung adenocarcinoma', 'IGC'),
'76': ('Thomas Jefferson University', 'Glioblastoma multiforme', 'IGC'),
'77': ('Prince Charles Hospital', 'Lung squamous cell carcinoma', 'IGC'),
'78': ('Prince Charles Hospital', 'Lung adenocarcinoma', 'IGC'),
'79': ('Ontario Institute for Cancer Research (OICR)/Ottawa', 'Lung squamous cell carcinoma', 'IGC'),
'80': ('Ontario Institute for Cancer Research (OICR)/Ottawa', 'Lung adenocarcinoma', 'IGC'),
'81': ('CHI-Penrose Colorado', 'Glioblastoma multiforme', 'IGC'),
'82': ('CHI-Penrose Colorado', 'Lung squamous cell carcinoma', 'IGC'),
'83': ('CHI-Penrose Colorado', 'Lung adenocarcinoma', 'IGC'),
'85': ('Asterand', 'Lung squamous cell carcinoma', 'IGC'),
'86': ('Asterand', 'Lung adenocarcinoma', 'IGC'),
'87': ('International Genomics Consortium', 'Glioblastoma multiforme', 'IGC'),
'90': ('ABS - IUPUI', 'Lung squamous cell carcinoma', 'IGC'),
'91': ('ABS - IUPUI', 'Lung adenocarcinoma', 'IGC'),
'92': ('Washington University - St. Louis', 'Lung squamous cell carcinoma', 'IGC'),
'93': ('Washington University - St. Louis', 'Lung adenocarcinoma', 'IGC'),
'94': ('Washington University - Emory', 'Lung squamous cell carcinoma', 'IGC'),
'95': ('Washington University - Emory', 'Lung adenocarcinoma', 'IGC'),
'96': ('Washington University - NYU', 'Lung squamous cell carcinoma', 'IGC'),
'97': ('Washington University - NYU', 'Lung adenocarcinoma', 'IGC'),
'98': ('Washington University - Alabama', 'Lung squamous cell carcinoma', 'IGC'),
'99': ('Washington University - Alabama', 'Lung adenocarcinoma', 'IGC'),
'A1': ('UCSF', 'Breast invasive carcinoma', 'NCH'),
'A2': ('Walter Reed', 'Breast invasive carcinoma', 'NCH'),
'A3': ('International Genomics Consortium', 'Kidney renal clear cell carcinoma', 'IGC'),
'A4': ('International Genomics Consortium', 'Kidney renal papillary cell carcinoma', 'IGC'),
'A5': ('Cedars Sinai', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'A6': ('Christiana Healthcare', 'Colon adenocarcinoma', 'IGC'),
'A7': ('Christiana Healthcare', 'Breast invasive carcinoma', 'NCH'),
'A8': ('Indivumed', 'Breast invasive carcinoma', 'NCH'),
'AA': ('Indivumed', 'Colon adenocarcinoma', 'IGC'),
'AB': ('Washington University', 'Acute Myeloid Leukemia', 'NCH'),
'AC': ('International Genomics Consortium', 'Breast invasive carcinoma', 'NCH'),
'AD': ('International Genomics Consortium', 'Colon adenocarcinoma', 'IGC'),
'AF': ('Christiana Healthcare', 'Rectum adenocarcinoma', 'IGC'),
'AG': ('Indivumed', 'Rectum adenocarcinoma', 'IGC'),
'AH': ('International Genomics Consortium', 'Rectum adenocarcinoma', 'IGC'),
'AJ': ('International Genomics Conosrtium', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AK': ('Fox Chase', 'Kidney renal clear cell carcinoma', 'IGC'),
'AL': ('Fox Chase', 'Kidney renal papillary cell carcinoma', 'IGC'),
'AM': ('Cureline', 'Colon adenocarcinoma', 'IGC'),
'AN': ('Cureline', 'Breast invasive carcinoma', 'NCH'),
'AO': ('MSKCC', 'Breast invasive carcinoma', 'NCH'),
'AP': ('MSKCC', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AQ': ('UNC ', 'Breast invasive carcinoma', 'NCH'),
'AR': ('Mayo', 'Breast invasive carcinoma', 'NCH'),
'AS': ('St. Joseph\'s Medical Center-(MD)', 'Kidney renal clear cell carcinoma', 'IGC'),
'AT': ('St. Joseph\'s Medical Center-(MD)', 'Kidney renal papillary cell carcinoma', 'IGC'),
'AU': ('St. Joseph\'s Medical Center-(MD)', 'Colon adenocarcinoma', 'IGC'),
'AV': ('NCH', 'Cell Line Control', 'NCH'),
'AW': ('Cureline', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AX': ('Gynecologic Oncology Group', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'AY': ('UNC', 'Colon adenocarcinoma', 'IGC'),
'AZ': ('University of Pittsburgh', 'Colon adenocarcinoma', 'IGC'),
'B0': ('University of Pittsburgh', 'Kidney renal clear cell carcinoma', 'IGC'),
'B1': ('University of Pittsburgh', 'Kidney renal papillary cell carcinoma', 'IGC'),
'B2': ('Christiana Healthcare', 'Kidney renal clear cell carcinoma', 'IGC'),
'B3': ('Christiana Healthcare', 'Kidney renal papillary cell carcinoma', 'IGC'),
'B4': ('Cureline', 'Kidney renal clear cell carcinoma', 'IGC'),
'B5': ('Duke', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'B6': ('Duke', 'Breast invasive carcinoma', 'NCH'),
'B7': ('Cureline', 'Stomach adenocarcinoma', 'IGC'),
'B8': ('UNC', 'Kidney renal clear cell carcinoma', 'IGC'),
'B9': ('UNC', 'Kidney renal papillary cell carcinoma', 'IGC'),
'BA': ('UNC', 'Head and Neck squamous cell carcinoma', 'IGC'),
'BB': ('Johns Hopkins', 'Head and Neck squamous cell carcinoma', 'IGC'),
'BC': ('UNC', 'Liver hepatocellular carcinoma', 'NCH'),
'BD': ('University of Pittsburgh', 'Liver hepatocellular carcinoma', 'NCH'),
'BF': ('Cureline', 'Skin Cutaneous Melanoma', 'NCH'),
'BG': ('University of Pittsburgh', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'BH': ('University of Pittsburgh', 'Breast invasive carcinoma', 'NCH'),
'BI': ('University of Pittsburgh', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'BJ': ('University of Pittsburgh', 'Thyroid carcinoma', 'IGC'),
'BK': ('Christiana Healthcare', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'BL': ('Christiana Healthcare', 'Bladder Urothelial Carcinoma', 'NCH'),
'BM': ('UNC', 'Rectum adenocarcinoma', 'IGC'),
'BP': ('MSKCC', 'Kidney renal clear cell carcinoma', 'IGC'),
'BQ': ('MSKCC', 'Kidney renal papillary cell carcinoma', 'IGC'),
'BR': ('Asterand', 'Stomach adenocarcinoma', 'IGC'),
'BS': ('University of Hawaii', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'BT': ('University of Pittsburgh', 'Bladder Urothelial Carcinoma', 'NCH'),
'BW': ('St. Joseph\'s Medical Center-(MD)', 'Liver hepatocellular carcinoma', 'NCH'),
'C4': ('Indivumed', 'Bladder Urothelial Carcinoma', 'NCH'),
'C5': ('Medical College of Wisconsin', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'C8': ('ILSBio', 'Breast invasive carcinoma', 'NCH'),
'C9': ('ILSBio', 'Head and Neck squamous cell carcinoma', 'NCH'),
'CA': ('ILSBio', 'Colon adenocarcinoma', 'IGC'),
'CB': ('ILSBio', 'Kidney renal clear cell carcinoma', 'IGC'),
'CC': ('ILSBio', 'Liver hepatocellular carcinoma', 'NCH'),
'CD': ('ILSBio', 'Stomach adenocarcinoma', 'IGC'),
'CE': ('ILSBio', 'Thyroid carcinoma', 'IGC'),
'CF': ('ILSBio', 'Bladder Urothelial Carcinoma', 'NCH'),
'CG': ('Indivumed', 'Stomach adenocarcinoma', 'IGC'),
'CH': ('Indivumed', 'Prostate adenocarcinoma', 'IGC'),
'CI': ('University of Pittsburgh', 'Rectum adenocarcinoma', 'IGC'),
'CJ': ('MD Anderson Cancer Center', 'Kidney renal clear cell carcinoma', 'IGC'),
'CK': ('Harvard', 'Colon adenocarcinoma', 'IGC'),
'CL': ('Harvard', 'Rectum adenocarcinoma', 'IGC'),
'CM': ('MSKCC', 'Colon adenocarcinoma', 'IGC'),
'CN': ('University of Pittsburgh', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CQ': ('University Health Network, Toronto', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CR': ('Vanderbilt University', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CS': ('Thomas Jefferson University', 'Brain Lower Grade Glioma', 'IGC'),
'CU': ('UNC', 'Bladder Urothelial Carcinoma', 'NCH'),
'CV': ('MD Anderson Cancer Center', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CW': ('Mayo Clinic - Rochester', 'Kidney renal clear cell carcinoma', 'IGC'),
'CX': ('Medical College of Georgia', 'Head and Neck squamous cell carcinoma', 'IGC'),
'CZ': ('Harvard', 'Kidney renal clear cell carcinoma', 'IGC'),
'D1': ('Mayo Clinic', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'D3': ('MD Anderson', 'Skin Cutaneous Melanoma', 'NCH'),
'D5': ('Greater Poland Cancer Center', 'Colon adenocarcinoma', 'IGC'),
'D6': ('Greater Poland Cancer Center', 'Head and Neck squamous cell carcinoma', 'IGC'),
'D7': ('Greater Poland Cancer Center', 'Stomach adenocarcinoma', 'IGC'),
'D8': ('Greater Poland Cancer Center', 'Breast invasive carcinoma', 'NCH'),
'D9': ('Greater Poland Cancer Center', 'Skin Cutaneous Melanoma', 'NCH'),
'DA': ('Yale', 'Skin Cutaneous Melanoma', 'NCH'),
'DB': ('Mayo Clinic - Rochester', 'Brain Lower Grade Glioma', 'IGC'),
'DC': ('MSKCC', 'Rectum adenocarcinoma', 'IGC'),
'DD': ('Mayo Clinic - Rochester', 'Liver hepatocellular carcinoma', 'NCH'),
'DE': ('University of North Carolina', 'Thyroid carcinoma', 'NCH'),
'DF': ('Ontario Institute for Cancer Research', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'DG': ('Ontario Institute for Cancer Research', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'DH': ('University of Florida', 'Brain Lower Grade Glioma', 'IGC'),
'DI': ('MD Anderson', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'DJ': ('Memorial Sloan Kettering', 'Thyroid carcinoma', 'NCH'),
'DK': ('Memorial Sloan Kettering', 'Bladder Urothelial Carcinoma', 'NCH'),
'DM': ('University Of Michigan', 'Colon adenocarcinoma', 'NCH'),
'DO': ('Medical College of Georgia', 'Thyroid carcinoma', 'NCH'),
'DQ': ('University Of Michigan', 'Head and Neck squamous cell carcinoma', 'IGC'),
'DR': ('University of Hawaii', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'DS': ('Cedars Sinai', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'DT': ('ILSBio', 'Rectum adenocarcinoma', 'IGC'),
'DU': ('Henry Ford Hospital', 'Brain Lower Grade Glioma', 'IGC'),
'DV': ('NCI Urologic Oncology Branch', 'Kidney renal clear cell carcinoma', 'IGC'),
'DW': ('NCI Urologic Oncology Branch', 'Kidney renal papillary cell carcinoma', 'IGC'),
'DX': ('Memorial Sloan Kettering', 'Sarcoma', 'NCH'),
'DY': ('University Of Michigan', 'Rectum adenocarcinoma', 'NCH'),
'DZ': ('Mayo Clinic - Rochester', 'Kidney renal papillary cell carcinoma', 'IGC'),
'E1': ('Duke', 'Brain Lower Grade Glioma', 'IGC'),
'E2': ('Roswell Park', 'Breast invasive carcinoma', 'NCH'),
'E3': ('Roswell Park', 'Thyroid carcinoma', 'NCH'),
'E5': ('Roswell Park', 'Bladder Urothelial Carcinoma', 'NCH'),
'E6': ('Roswell Park', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'E7': ('Asterand', 'Bladder Urothelial Carcinoma', 'NCH'),
'E8': ('Asterand', 'Thyroid carcinoma', 'NCH'),
'E9': ('Asterand', 'Breast invasive carcinoma', 'NCH'),
'EA': ('Asterand', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'EB': ('Asterand', 'Skin Cutaneous Melanoma', 'NCH'),
'EC': ('Asterand', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'ED': ('Asterand', 'Liver hepatocellular carcinoma', 'NCH'),
'EE': ('University of Sydney', 'Skin Cutaneous Melanoma', 'NCH'),
'EF': ('Cureline', 'Rectum adenocarcinoma', 'IGC'),
'EI': ('Greater Poland Cancer Center', 'Rectum adenocarcinoma', 'IGC'),
'EJ': ('University of Pittsburgh', 'Prostate adenocarcinoma', 'IGC'),
'EK': ('Gynecologic Oncology Group', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'EL': ('MD Anderson', 'Thyroid carcinoma', 'NCH'),
'EM': ('University Health Network', 'Thyroid carcinoma', 'NCH'),
'EO': ('University Health Network', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'EP': ('Christiana Healthcare', 'Liver hepatocellular carcinoma', 'NCH'),
'EQ': ('Christiana Healthcare', 'Stomach adenocarcinoma', 'IGC'),
'ER': ('University of Pittsburgh', 'Skin Cutaneous Melanoma', 'NCH'),
'ES': ('University of Florida', 'Liver hepatocellular carcinoma', 'NCH'),
'ET': ('Johns Hopkins', 'Thyroid carcinoma', 'NCH'),
'EU': ('CHI-Penrose Colorado', 'Kidney renal clear cell carcinoma', 'IGC'),
'EV': ('CHI-Penrose Colorado', 'Kidney renal papillary cell carcinoma', 'IGC'),
'EW': ('University of Miami', 'Breast invasive carcinoma', 'NCH'),
'EX': ('University of North Carolina', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'EY': ('University of North Carolina', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'EZ': ('UNC', 'Brain Lower Grade Glioma', 'IGC'),
'F1': ('UNC', 'Stomach adenocarcinoma', 'IGC'),
'F2': ('UNC', 'Pancreatic adenocarcinoma', 'IGC'),
'F4': ('Asterand', 'Colon adenocarcinoma', 'IGC'),
'F5': ('Asterand', 'Rectum adenocarcinoma', 'IGC'),
'F6': ('Asterand', 'Brain Lower Grade Glioma', 'IGC'),
'F7': ('Asterand', 'Head and Neck squamous cell carcinoma', 'IGC'),
'F9': ('Asterand', 'Kidney renal papillary cell carcinoma', 'IGC'),
'FA': ('Asterand', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'FB': ('Asterand', 'Pancreatic adenocarcinoma', 'IGC'),
'FC': ('Asterand', 'Prostate adenocarcinoma', 'IGC'),
'FD': ('BLN - University Of Chicago', 'Bladder Urothelial Carcinoma', 'NCH'),
'FE': ('Ohio State University', 'Thyroid carcinoma', 'NCH'),
'FF': ('SingHealth', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'FG': ('Case Western', 'Brain Lower Grade Glioma', 'IGC'),
'FH': ('CHI-Penrose Colorado', 'Thyroid carcinoma', 'NCH'),
'FI': ('Washington University', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'FJ': ('BLN - Baylor', 'Bladder Urothelial Carcinoma', 'NCH'),
'FK': ('Johns Hopkins', 'Thyroid carcinoma', 'NCH'),
'FL': ('University of Hawaii - Normal Study', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'FM': ('International Genomics Consortium', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'FN': ('International Genomics Consortium', 'Brain Lower Grade Glioma', 'IGC'),
'FP': ('International Genomics Consortium', 'Stomach adenocarcinoma', 'IGC'),
'FQ': ('Johns Hopkins', 'Pancreatic adenocarcinoma', 'IGC'),
'FR': ('University of North Carolina', 'Skin Cutaneous Melanoma', 'NCH'),
'FS': ('Essen', 'Skin Cutaneous Melanoma', 'NCH'),
'FT': ('BLN - University of Miami', 'Bladder Urothelial Carcinoma', 'NCH'),
'FU': ('International Genomics Consortium', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'FV': ('International Genomics Consortium', 'Liver hepatocellular carcinoma', 'NCH'),
'FW': ('International Genomics Consortium', 'Skin Cutaneous Melanoma', 'NCH'),
'FX': ('International Genomics Consortium', 'Sarcoma', 'NCH'),
'FY': ('International Genomics Consortium', 'Thyroid carcinoma', 'NCH'),
'FZ': ('University of Pittsburgh', 'Pancreatic adenocarcinoma', 'IGC'),
'G2': ('MD Anderson', 'Bladder Urothelial Carcinoma', 'NCH'),
'G3': ('Alberta Health Services', 'Liver hepatocellular carcinoma', 'NCH'),
'G4': ('Roswell Park', 'Colon adenocarcinoma', 'IGC'),
'G5': ('Roswell Park', 'Rectum adenocarcinoma', 'IGC'),
'G6': ('Roswell Park', 'Kidney renal clear cell carcinoma', 'IGC'),
'G7': ('Roswell Park', 'Kidney renal papillary cell carcinoma', 'IGC'),
'G8': ('Roswell Park', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'G9': ('Roswell Park', 'Prostate adenocarcinoma', 'IGC'),
'GC': ('International Genomics Consortium', 'Bladder Urothelial Carcinoma', 'NCH'),
'GD': ('ABS - IUPUI', 'Bladder Urothelial Carcinoma', 'NCH'),
'GE': ('ABS - IUPUI', 'Thyroid carcinoma', 'NCH'),
'GF': ('ABS - IUPUI', 'Skin Cutaneous Melanoma', 'NCH'),
'GG': ('ABS - IUPUI', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'GH': ('ABS - IUPUI', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'GI': ('ABS - IUPUI', 'Breast invasive carcinoma', 'NCH'),
'GJ': ('ABS - IUPUI', 'Liver hepatocellular carcinoma', 'NCH'),
'GK': ('ABS - IUPUI', 'Kidney renal clear cell carcinoma', 'IGC'),
'GL': ('ABS - IUPUI', 'Kidney renal papillary cell carcinoma', 'IGC'),
'GM': ('MD Anderson', 'Breast invasive carcinoma', 'NCH'),
'GN': ('Roswell', 'Skin Cutaneous Melanoma', 'NCH'),
'GP': ('MD Anderson', 'Acute Myeloid Leukemia', 'NCH'),
'GR': ('University of Nebraska Medical Center (UNMC)', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'GS': ('Fundacio Clinic per a la Recerca Biomedica', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'GU': ('BLN - UT Southwestern Medical Center at Dallas', 'Bladder Urothelial Carcinoma', 'NCH'),
'GV': ('BLN - Cleveland Clinic', 'Bladder Urothelial Carcinoma', 'NCH'),
'GZ': ('BC Cancer Agency', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'H1': ('Medical College of Georgia', 'Stomach adenocarcinoma', 'IGC'),
'H2': ('Christiana Healthcare', 'Thyroid carcinoma', 'NCH'),
'H3': ('ABS - IUPUI', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'IGC'),
'H4': ('Medical College of Georgia', 'Bladder Urothelial Carcinoma', 'NCH'),
'H5': ('Medical College of Georgia', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'H6': ('Christiana Healthcare', 'Pancreatic adenocarcinoma', 'IGC'),
'H7': ('ABS - IUPUI', 'Head and Neck squamous cell carcinoma', 'IGC'),
'H8': ('ABS - IUPUI', 'Pancreatic adenocarcinoma', 'IGC'),
'H9': ('ABS - IUPUI', 'Prostate adenocarcinoma', 'IGC'),
'HA': ('Alberta Health Services', 'Stomach adenocarcinoma', 'IGC'),
'HB': ('University of North Carolina', 'Sarcoma', 'NCH'),
'HC': ('International Genomics Consortium', 'Prostate adenocarcinoma', 'IGC'),
'HD': ('International Genomics Consortium', 'Head and Neck squamous cell carcinoma', 'IGC'),
'HE': ('Ontario Institute for Cancer Research (OICR)', 'Kidney renal papillary cell carcinoma', 'IGC'),
'HF': ('Ontario Institute for Cancer Research (OICR)', 'Stomach adenocarcinoma', 'IGC'),
'HG': ('Roswell Park', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'HH': ('Fox Chase', 'Stomach adenocarcinoma', 'IGC'),
'HI': ('Fox Chase', 'Prostate adenocarcinoma', 'IGC'),
'HJ': ('Fox Chase', 'Stomach adenocarcinoma', 'IGC'),
'HK': ('Fox Chase', 'Brain Lower Grade Glioma', 'IGC'),
'HL': ('Fox Chase', 'Head and Neck squamous cell carcinoma', 'IGC'),
'HM': ('Christiana Healthcare', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'HN': ('Ontario Institute for Cancer Research (OICR)', 'Breast invasive carcinoma', 'NCH'),
'HP': ('Ontario Institute for Cancer Research (OICR)', 'Liver hepatocellular carcinoma', 'NCH'),
'HQ': ('Ontario Institute for Cancer Research (OICR)', 'Bladder Urothelial Carcinoma', 'NCH'),
'HR': ('Ontario Institute for Cancer Research (OICR)', 'Skin Cutaneous Melanoma', 'NCH'),
'HS': ('Ontario Institute for Cancer Research (OICR)', 'Sarcoma', 'NCH'),
'HT': ('Case Western - St Joes', 'Brain Lower Grade Glioma', 'IGC'),
'HU': ('National Cancer Center Korea', 'Stomach adenocarcinoma', 'IGC'),
'HV': ('National Cancer Center Korea', 'Pancreatic adenocarcinoma', 'IGC'),
'HW': ('MSKCC', 'Brain Lower Grade Glioma', 'IGC'),
'HZ': ('International Genomics Consortium', 'Pancreatic adenocarcinoma', 'IGC'),
'IA': ('Cleveland Clinic', 'Kidney renal papillary cell carcinoma', 'IGC'),
'IB': ('Alberta Health Services', 'Pancreatic adenocarcinoma', 'IGC'),
'IC': ('University of Pittsburgh', 'Esophageal carcinoma ', 'NCH'),
'IE': ('ABS - IUPUI', 'Sarcoma', 'NCH'),
'IF': ('University of Texas MD Anderson Cancer Center', 'Sarcoma', 'NCH'),
'IG': ('Asterand', 'Esophageal carcinoma ', 'NCH'),
'IH': ('University of Miami', 'Skin Cutaneous Melanoma', 'NCH'),
'IJ': ('Christiana Healthcare', 'Acute Myeloid Leukemia', 'NCH'),
'IK': ('Christiana Healthcare', 'Brain Lower Grade Glioma', 'IGC'),
'IM': ('University of Miami', 'Thyroid carcinoma', 'NCH'),
'IN': ('University of Pittsburgh', 'Stomach adenocarcinoma', 'IGC'),
'IP': ('ABS - IUPUI', 'Stomach adenocarcinoma', 'IGC'),
'IQ': ('University of Miami', 'Head and Neck squamous cell carcinoma', 'IGC'),
'IR': ('Memorial Sloan Kettering', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'IS': ('Memorial Sloan Kettering', 'Sarcoma', 'NCH'),
'IW': ('Cedars Sinai', 'Sarcoma', 'NCH'),
'IZ': ('ABS - Lahey Clinic', 'Kidney renal papillary cell carcinoma', 'IGC'),
'J1': ('ABS - Lahey Clinic', 'Lung squamous cell carcinoma', 'IGC'),
'J2': ('ABS - Lahey Clinic', 'Lung adenocarcinoma', 'IGC'),
'J4': ('ABS - Lahey Clinic', 'Prostate adenocarcinoma', 'IGC'),
'J7': ('ILSBio', 'Kidney renal papillary cell carcinoma', 'IGC'),
'J8': ('Mayo Clinic', 'Thyroid carcinoma', 'NCH'),
'J9': ('Melbourne Health', 'Prostate adenocarcinoma', 'IGC'),
'JA': ('ABS - Research Metrics Pakistan', 'Head and Neck squamous cell carcinoma', 'IGC'),
'JL': ('ABS - Research Metrics Pakistan', 'Breast invasive carcinoma', 'NCH'),
'JU': ('BLN - Baylor', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'JV': ('BLN - Baylor', 'Sarcoma', 'NCH'),
'JW': ('BLN - Baylor', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'JX': ('Washington University', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'JY': ('University Health Network', 'Esophageal carcinoma ', 'NCH'),
'JZ': ('University of Rochester', 'Esophageal carcinoma ', 'NCH'),
'K1': ('University of Pittsburgh', 'Sarcoma', 'NCH'),
'K4': ('ABS - Lahey Clinic', 'Bladder Urothelial Carcinoma', 'NCH'),
'K6': ('ABS - Lahey Clinic', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'K7': ('ABS - Lahey Clinic', 'Liver hepatocellular carcinoma', 'NCH'),
'K8': ('ABS - Lahey Clinic', 'Skin Cutaneous Melanoma', 'NCH'),
'KA': ('ABS - Lahey Clinic', 'Esophageal carcinoma ', 'NCH'),
'KB': ('University Health Network, Toronto', 'Stomach adenocarcinoma', 'IGC'),
'KC': ('Cornell Medical College', 'Prostate adenocarcinoma', 'IGC'),
'KD': ('Mount Sinai School of Medicine', 'Sarcoma', 'NCH'),
'KE': ('Mount Sinai School of Medicine', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'KF': ('Christiana Healthcare', 'Sarcoma', 'NCH'),
'KG': ('Baylor Network', 'Pancreatic adenocarcinoma', 'IGC'),
'KH': ('Memorial Sloan Kettering', 'Esophageal carcinoma ', 'NCH'),
'KJ': ('University of Miami', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'KK': ('MD Anderson Cancer Center', 'Prostate adenocarcinoma', 'IGC'),
'KL': ('MSKCC', 'Kidney Chromophobe', 'IGC'),
'KM': ('NCI Urologic Oncology Branch', 'Kidney Chromophobe', 'IGC'),
'KN': ('Harvard', 'Kidney Chromophobe', 'IGC'),
'KO': ('MD Anderson Cancer Center', 'Kidney Chromophobe', 'IGC'),
'KP': ('British Columbia Cancer Agency', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'KQ': ('Cornell Medical College', 'Bladder Urothelial Carcinoma', 'NCH'),
'KR': ('University Of Michigan', 'Liver hepatocellular carcinoma', 'NCH'),
'KS': ('University Of Michigan', 'Thyroid carcinoma', 'NCH'),
'KT': ('Hartford', 'Brain Lower Grade Glioma', 'IGC'),
'KU': ('Hartford', 'Head and Neck squamous cell carcinoma', 'IGC'),
'KV': ('Hartford', 'Kidney renal papillary cell carcinoma', 'IGC'),
'KZ': ('Hartford', 'Stomach adenocarcinoma', 'IGC'),
'L1': ('Hartford', 'Pancreatic adenocarcinoma', 'IGC'),
'L3': ('Gundersen Lutheran Health System', 'Lung squamous cell carcinoma', 'IGC'),
'L4': ('Gundersen Lutheran Health System', 'Lung adenocarcinoma', 'IGC'),
'L5': ('University of Michigan', 'Esophageal carcinoma ', 'NCH'),
'L6': ('National Institutes of Health', 'Thyroid carcinoma', 'NCH'),
'L7': ('Christiana Care', 'Esophageal carcinoma ', 'NCH'),
'L8': ('University of Miami', 'Kidney renal papillary cell carcinoma', 'NCH'),
'L9': ('Candler', 'Lung adenocarcinoma', 'IGC'),
'LA': ('Candler', 'Lung squamous cell carcinoma', 'IGC'),
'LB': ('Candler', 'Pancreatic adenocarcinoma', 'IGC'),
'LC': ('Hartford Hospital', 'Bladder Urothelial Carcinoma', 'NCH'),
'LD': ('Hartford Hospital', 'Breast invasive carcinoma', 'NCH'),
'LG': ('Hartford Hospital', 'Liver hepatocellular carcinoma', 'NCH'),
'LH': ('Hartford Hospital', 'Skin Cutaneous Melanoma', 'NCH'),
'LI': ('Hartford Hospital', 'Sarcoma', 'NCH'),
'LK': ('University of Pittsburgh', 'Mesothelioma', 'NCH'),
'LL': ('Candler', 'Breast invasive carcinoma', 'NCH'),
'LN': ('ILSBIO', 'Esophageal carcinoma ', 'NCH'),
'LP': ('ILSBIO', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'LQ': ('Gundersen Lutheran Health System', 'Breast invasive carcinoma', 'NCH'),
'LS': ('Gundersen Lutheran Health System', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'LT': ('Gundersen Lutheran Health System', 'Bladder Urothelial Carcinoma', 'NCH'),
'M7': ('University of North Carolina', 'Prostate adenocarcinoma', 'NCH'),
'M8': ('Ontario Institute for Cancer Research (OICR)', 'Pancreatic adenocarcinoma', 'NCH'),
'M9': ('Ontario Institute for Cancer Research (OICR)', 'Esophageal carcinoma ', 'NCH'),
'MA': ('MD Anderson Cancer Center', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'MB': ('University of Minnesota', 'Sarcoma', 'NCH'),
'ME': ('University of Minnesota', 'Lung adenocarcinoma', 'NCH'),
'MF': ('University of Minnesota', 'Lung squamous cell carcinoma', 'NCH'),
'MG': ('BLN - Baylor', 'Prostate adenocarcinoma', 'NCH'),
'MH': ('BLN - Baylor', 'Kidney renal papillary cell carcinoma', 'NCH'),
'MI': ('BLN - Baylor', 'Liver hepatocellular carcinoma', 'NCH'),
'MJ': ('BLN - Baylor', 'Sarcoma', 'NCH'),
'MK': ('BLN - Baylor', 'Thyroid carcinoma', 'NCH'),
'ML': ('BLN - Baylor', 'Lung squamous cell carcinoma', 'NCH'),
'MM': ('BLN - Baylor', 'Kidney renal clear cell carcinoma', 'NCH'),
'MN': ('BLN - Baylor', 'Lung adenocarcinoma', 'NCH'),
'MO': ('ILSBio', 'Sarcoma', 'NCH'),
'MP': ('Washington University - Mayo Clinic', 'Lung adenocarcinoma', 'NCH'),
'MQ': ('Washington University - NYU', 'Mesothelioma', 'NCH'),
'MR': ('University of Minnesota', 'Liver hepatocellular carcinoma', 'NCH'),
'MS': ('University of Minnesota', 'Breast invasive carcinoma', 'NCH'),
'MT': ('University of Minnesota', 'Head and Neck squamous cell carcinoma', 'NCH'),
'MU': ('University of Minnesota', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'MV': ('University of Minnesota', 'Bladder Urothelial Carcinoma', 'NCH'),
'MW': ('University of Miami', 'Kidney renal clear cell carcinoma', 'NCH'),
'MX': ('MSKCC', 'Stomach adenocarcinoma', 'NCH'),
'MY': ('Montefiore Medical Center', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'MZ': ('Montefiore Medical Center', 'Head and Neck squamous cell carcinoma', 'NCH'),
'N1': ('Montefiore Medical Center', 'Sarcoma', 'NCH'),
'N5': ('MSKCC', 'Uterine Carcinosarcoma', 'NCH'),
'N6': ('University of Pittsburgh', 'Uterine Carcinosarcoma', 'NCH'),
'N7': ('Washington University', 'Uterine Carcinosarcoma', 'NCH'),
'N8': ('University of North Carolina', 'Uterine Carcinosarcoma', 'NCH'),
'N9': ('MD Anderson', 'Uterine Carcinosarcoma', 'NCH'),
'NA': ('Duke University', 'Uterine Carcinosarcoma', 'NCH'),
'NB': ('Washington University - CHUV', 'Lung adenocarcinoma', 'NCH'),
'NC': ('Washington University - CHUV', 'Lung squamous cell carcinoma', 'NCH'),
'ND': ('Cedars Sinai', 'Uterine Carcinosarcoma', 'NCH'),
'NF': ('Mayo Clinic - Rochester', 'Uterine Carcinosarcoma', 'NCH'),
'NG': ('Roswell Park', 'Uterine Carcinosarcoma', 'NCH'),
'NH': ('Candler', 'Colon adenocarcinoma', 'NCH'),
'NI': ('Roswell Park', 'Liver hepatocellular carcinoma', 'NCH'),
'NJ': ('Washington University - Rush University', 'Lung adenocarcinoma', 'NCH'),
'NK': ('Washington University - Rush University', 'Lung squamous cell carcinoma', 'NCH'),
'NM': ('Cambridge BioSource', 'Head and Neck squamous cell carcinoma', 'NCH'),
'NP': ('International Genomics Consortium', 'Kidney Chromophobe', 'NCH'),
'NQ': ('International Genomics Consortium', 'Mesothelioma', 'NCH'),
'NS': ('Gundersen Lutheran Health System', 'Skin Cutaneous Melanoma', 'NCH'),
'O1': ('Washington University - CALGB', 'Lung adenocarcinoma', 'NCH'),
'O2': ('Washington University - CALGB', 'Lung squamous cell carcinoma', 'NCH'),
'O8': ('Saint Mary\'s Health Care', 'Liver hepatocellular carcinoma', 'NCH'),
'O9': ('Saint Mary\'s Health Care', 'Kidney renal papillary cell carcinoma', 'NCH'),
'OC': ('Saint Mary\'s Health Care', 'Lung squamous cell carcinoma', 'NCH'),
'OD': ('Saint Mary\'s Health Care', 'Skin Cutaneous Melanoma', 'NCH'),
'OE': ('Saint Mary\'s Health Care', 'Pancreatic adenocarcinoma', 'NCH'),
'OJ': ('Saint Mary\'s Health Care', 'Thyroid carcinoma', 'NCH'),
'OK': ('Mount Sinai School of Medicine', 'Breast invasive carcinoma', 'NCH'),
'OL': ('University of Chicago', 'Breast invasive carcinoma', 'NCH'),
'OR': ('University of Michigan', 'Adrenocortical carcinoma', 'NCH'),
'OU': ('Roswell Park', 'Adrenocortical carcinoma', 'NCH'),
'OW': ('International Genomics Consortium', 'Miscellaneous', 'NCH'),
'OX': ('University of North Carolina', 'Glioblastoma multiforme', 'NCH'),
'OY': ('University of North Carolina', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'P3': ('Fred Hutchinson', 'Head and Neck squamous cell carcinoma', 'NCH'),
'P4': ('MD Anderson Cancer Center', 'Kidney renal papillary cell carcinoma', 'NCH'),
'P5': ('Cureline', 'Brain Lower Grade Glioma', 'NCH'),
'P6': ('Translational Genomics Research Institute', 'Adrenocortical carcinoma', 'NCH'),
'P7': ('Translational Genomics Research Institute', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'P8': ('University of Pittsburgh', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'P9': ('University of Minnesota', 'Pancreatic adenocarcinoma', 'NCH'),
'PA': ('University of Minnesota', 'Adrenocortical carcinoma', 'NCH'),
'PB': ('University of Minnesota', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'NCH'),
'PC': ('Fox Chase', 'Sarcoma', 'NCH'),
'PD': ('Fox Chase', 'Liver hepatocellular carcinoma', 'NCH'),
'PE': ('Fox Chase', 'Breast invasive carcinoma', 'NCH'),
'PG': ('Montefiore Medical Center', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'PH': ('Gundersen Lutheran', 'Acute Myeloid Leukemia', 'NCH'),
'PJ': ('Gundersen Lutheran', 'Kidney renal papillary cell carcinoma', 'NCH'),
'PK': ('University Health Network', 'Adrenocortical carcinoma', 'NCH'),
'PL': ('Institute of Human Virology Nigeria', 'Breast invasive carcinoma', 'NCH'),
'PN': ('Institute of Human Virology Nigeria', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'PQ': ('University of Colorado Denver', 'Bladder Urothelial Carcinoma', 'NCH'),
'PR': ('Roswell Park', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'PT': ('Maine Medical Center', 'Sarcoma', 'NCH'),
'PZ': ('ABS - Lahey Clinic', 'Pancreatic adenocarcinoma', 'NCH'),
'Q1': ('University of Oklahoma HSC', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'Q2': ('University of Oklahoma HSC', 'Kidney renal papillary cell carcinoma', 'NCH'),
'Q3': ('University of Oklahoma HSC', 'Pancreatic adenocarcinoma', 'NCH'),
'Q4': ('Emory University', 'Acute Myeloid Leukemia', 'NCH'),
'Q9': ('Emory University', 'Esophageal carcinoma ', 'NCH'),
'QA': ('Emory University', 'Liver hepatocellular carcinoma', 'NCH'),
'QB': ('Emory University', 'Skin Cutaneous Melanoma', 'NCH'),
'QC': ('Emory University', 'Sarcoma', 'NCH'),
'QD': ('Emory University', 'Thyroid carcinoma', 'NCH'),
'QF': ('BLN - Baylor', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'QG': ('BLN - Baylor', 'Colon adenocarcinoma', 'NCH'),
'QH': ('Fondazione-Besta', 'Brain Lower Grade Glioma', 'NCH'),
'QJ': ('Mount Sinai School of Medicine', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'QK': ('Emory University - Winship Cancer Inst.', 'Head and Neck squamous cell carcinoma', 'NCH'),
'QL': ('University of Chicago', 'Colon adenocarcinoma', 'NCH'),
'QM': ('University of Oklahoma HSC', 'Uterine Carcinosarcoma', 'NCH'),
'QN': ('ILSBio', 'Uterine Carcinosarcoma', 'NCH'),
'QQ': ('Roswell Park', 'Sarcoma', 'NCH'),
'QR': ('National Institutes of Health', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'QS': ('Candler', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'QT': ('University of North Carolina', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'QU': ('Harvard Beth Israel', 'Prostate adenocarcinoma', 'NCH'),
'QV': ('Instituto Nacional de Cancerologia', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'QW': ('Instituto Nacional de Cancerologia', 'Stomach adenocarcinoma', 'NCH'),
'R1': ('CHI-Penrose Colorado', 'Colon adenocarcinoma', 'NCH'),
'R2': ('CHI-Penrose Colorado', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'R3': ('CHI-Penrose Colorado', 'Bladder Urothelial Carcinoma', 'NCH'),
'R5': ('MD Anderson Cancer Center', 'Stomach adenocarcinoma', 'NCH'),
'R6': ('MD Anderson Cancer Center', 'Esophageal carcinoma ', 'NCH'),
'R7': ('Gundersen Lutheran Health System', 'Head and Neck squamous cell carcinoma', 'NCH'),
'R8': ('MD Anderson', 'Brain Lower Grade Glioma', 'NCH'),
'R9': ('Candler', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'RA': ('Candler', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'RB': ('Emory University', 'Pancreatic adenocarcinoma', 'NCH'),
'RC': ('University of Utah', 'Liver hepatocellular carcinoma', 'NCH'),
'RD': ('Peter MacCallum Cancer Center', 'Stomach adenocarcinoma', 'NCH'),
'RE': ('Peter MacCallum Cancer Center', 'Esophageal carcinoma ', 'NCH'),
'RG': ('Montefiore Medical Center', 'Liver hepatocellular carcinoma', 'NCH'),
'RH': ('BLN - Baylor', 'Head and Neck squamous cell carcinoma', 'NCH'),
'RL': ('St. Joseph\'s Hospital AZ', 'Pancreatic adenocarcinoma', 'NCH'),
'RM': ('St. Joseph\'s Hospital AZ', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RN': ('St. Joseph\'s Hospital AZ', 'Sarcoma', 'NCH'),
'RP': ('St. Joseph\'s Hospital AZ', 'Skin Cutaneous Melanoma', 'NCH'),
'RQ': ('St. Joseph\'s Hospital AZ', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'NCH'),
'RR': ('St. Joseph\'s Hospital AZ', 'Glioblastoma multiforme', 'NCH'),
'RS': ('Memorial Sloan Kettering Cancer Center', 'Head and Neck squamous cell carcinoma', 'NCH'),
'RT': ('Cleveland Clinic Foundation', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RU': ('Northwestern University', 'Colon adenocarcinoma', 'NCH'),
'RV': ('Northwestern University', 'Pancreatic adenocarcinoma', 'NCH'),
'RW': ('Michigan University', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RX': ('University of Minnesota', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'RY': ('University of California San Francisco', 'Brain Lower Grade Glioma', 'NCH'),
'RZ': ('Wills Eye Institute', 'Uveal Melanoma', 'NCH'),
'S2': ('Albert Einstein Medical Center', 'Lung adenocarcinoma', 'NCH'),
'S3': ('Albert Einstein Medical Center', 'Breast invasive carcinoma', 'NCH'),
'S4': ('University of Chicago', 'Pancreatic adenocarcinoma', 'NCH'),
'S5': ('University of Oklahoma HSC', 'Bladder Urothelial Carcinoma', 'NCH'),
'S6': ('Gundersen Lutheran Health System', 'Testicular Germ Cell Tumors', 'NCH'),
'S7': ('University Hospital Motol', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'S8': ('ABS - IUPUI', 'Esophageal carcinoma ', 'NCH'),
'S9': ('Dept of Neurosurgery at University of Heidelberg', 'Brain Lower Grade Glioma', 'NCH'),
'SA': ('ABS - IUPUI', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SB': ('Baylor College of Medicine', 'Testicular Germ Cell Tumors', 'NCH'),
'SC': ('Memorial Sloan Kettering', 'Mesothelioma', 'NCH'),
'SD': ('MD Anderson', 'Pancreatic adenocarcinoma', 'NCH'),
'SE': ('Boston Medical Center', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SG': ('Cleveland Clinic Foundation', 'Sarcoma', 'NCH'),
'SH': ('Papworth Hospital', 'Mesothelioma', 'NCH'),
'SI': ('Washington University St. Louis', 'Sarcoma', 'NCH'),
'SJ': ('Albert Einstein Medical Center', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'SK': ('St. Joseph\'s Hospital AZ', 'Colon adenocarcinoma', 'NCH'),
'SL': ('St. Joseph\'s Hospital AZ', 'Uterine Corpus Endometrial Carcinoma', 'NCH'),
'SN': ('BLN - Baylor', 'Testicular Germ Cell Tumors', 'NCH'),
'SO': ('University of Minnesota', 'Testicular Germ Cell Tumors', 'NCH'),
'SP': ('University Health Network', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SQ': ('International Genomics Consortium', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SR': ('Tufts Medical Center', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'SS': ('Medical College of Georgia', 'Colon adenocarcinoma', 'NCH'),
'ST': ('Global Bioclinical-Moldova', 'Head and Neck squamous cell carcinoma', 'NCH'),
'SU': ('Global Bioclinical-Moldova', 'Prostate adenocarcinoma', 'NCH'),
'SW': ('Global Bioclinical-Moldova', 'Stomach adenocarcinoma', 'NCH'),
'SX': ('Mayo Clinic Arizona', 'Kidney renal papillary cell carcinoma', 'NCH'),
'SY': ('Mayo Clinic Arizona', 'Bladder Urothelial Carcinoma', 'NCH'),
'T1': ('St. Joseph\'s Hospital Arizona', 'Liver hepatocellular carcinoma', 'NCH'),
'T2': ('St. University of Colorado Denver', 'Head and Neck squamous cell carcinoma', 'NCH'),
'T3': ('Molecular Response', 'Head and Neck squamous cell carcinoma', 'NCH'),
'T6': ('Molecular Response', 'Lung adenocarcinoma', 'NCH'),
'T7': ('Molecular Response', 'Kidney renal clear cell carcinoma', 'NCH'),
'T9': ('Molecular Response', 'Colon adenocarcinoma', 'NCH'),
'TE': ('Global BioClinical - Georgia', 'Skin Cutaneous Melanoma', 'NCH'),
'TG': ('Global BioClinical - Georgia', 'Head and Neck squamous cell carcinoma', 'NCH'),
'TK': ('Global BioClinical - Georgia', 'Prostate adenocarcinoma', 'NCH'),
'TL': ('Global BioClinical - Georgia', 'Stomach adenocarcinoma', 'NCH'),
'TM': ('The University of New South Wales', 'Brain Lower Grade Glioma', 'NCH'),
'TN': ('Ohio State University', 'Head and Neck squamous cell carcinoma', 'NCH'),
'TP': ('Maine Medical Center', 'Prostate adenocarcinoma', 'NCH'),
'TQ': ('University of Sao Paulo', 'Brain Lower Grade Glioma', 'NCH'),
'TR': ('Global Bioclinical-Moldova', 'Skin Cutaneous Melanoma', 'NCH'),
'TS': ('University of Pennsylvania', 'Mesothelioma', 'NCH'),
'TT': ('University of Pennsylvania', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'TV': ('Wake Forest University', 'Breast invasive carcinoma', 'NCH'),
'UB': ('UCSF', 'Liver hepatocellular carcinoma', 'NCH'),
'UC': ('University of Washington', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'UD': ('University of Western Australia', 'Mesothelioma', 'NCH'),
'UE': ('Asterand', 'Sarcoma', 'NCH'),
'UF': ('Barretos Cancer Hospital', 'Head and Neck squamous cell carcinoma', 'NCH'),
'UJ': ('Boston Medical Center', 'Lung squamous cell carcinoma', 'NCH'),
'UL': ('Boston Medical Center', 'Breast invasive carcinoma', 'NCH'),
'UN': ('Boston Medical Center', 'Kidney renal papillary cell carcinoma', 'NCH'),
'UP': ('Boston Medical Center', 'Head and Neck squamous cell carcinoma', 'NCH'),
'UR': ('Boston Medical Center', 'Prostate adenocarcinoma', 'NCH'),
'US': ('Garvan Institute of Medical Research', 'Pancreatic adenocarcinoma', 'NCH'),
'UT': ('Asbestos Diseases Research Institute', 'Mesothelioma', 'NCH'),
'UU': ('Mary Bird Perkins Cancer Center - Our Lady of the Lake', 'Breast invasive carcinoma', 'NCH'),
'UV': ('Capital Biosciences', 'Liver hepatocellular carcinoma', 'NCH'),
'UW': ('University of North Carolina', 'Kidney Chromophobe', 'NCH'),
'UY': ('University of California San Francisco', 'Bladder Urothelial Carcinoma', 'NCH'),
'UZ': ('University of California San Francisco', 'Kidney renal papillary cell carcinoma', 'NCH'),
'V1': ('University of California San Francisco', 'Prostate adenocarcinoma', 'NCH'),
'V2': ('Cleveland Clinic Foundation', 'Prostate adenocarcinoma', 'NCH'),
'V3': ('Cleveland Clinic Foundation', 'Uveal Melanoma', 'NCH'),
'V4': ('Institut Curie', 'Uveal Melanoma', 'NCH'),
'V5': ('Duke University', 'Esophageal carcinoma ', 'NCH'),
'V6': ('Duke University', 'Stomach adenocarcinoma', 'NCH'),
'V7': ('Medical College of Georgia', 'Breast invasive carcinoma', 'NCH'),
'V8': ('Medical College of Georgia', 'Kidney renal clear cell carcinoma', 'NCH'),
'V9': ('Medical College of Georgia', 'Kidney renal papillary cell carcinoma', 'NCH'),
'VA': ('Alliance', 'Stomach adenocarcinoma', 'NCH'),
'VB': ('Global BioClinical - Georgia', 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma', 'NCH'),
'VD': ('University of Liverpool', 'Uveal Melanoma', 'NCH'),
'VF': ('University of Pennsylvania', 'Testicular Germ Cell Tumors', 'NCH'),
'VG': ('Institute of Human Virology Nigeria', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'VK': ('Institute of Human Virology Nigeria', 'Colon adenocarcinoma', 'NCH'),
'VL': ('Institute of Human Virology Nigeria', 'Rectum adenocarcinoma', 'NCH'),
'VM': ('Huntsman Cancer Institute', 'Brain Lower Grade Glioma', 'NCH'),
'VN': ('NCI Urologic Oncology Branch', 'Prostate adenocarcinoma', 'NCH'),
'VP': ('Washington University', 'Prostate adenocarcinoma', 'NCH'),
'VQ': ('Barretos Cancer Hospital', 'Stomach adenocarcinoma', 'NCH'),
'VR': ('Barretos Cancer Hospital', 'Esophageal carcinoma ', 'NCH'),
'VS': ('Barretos Cancer Hospital', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'VT': ('Vanderbilt', 'Sarcoma', 'NCH'),
'VV': ('John Wayne Cancer Center', 'Brain Lower Grade Glioma', 'NCH'),
'VW': ('Northwestern University', 'Brain Lower Grade Glioma', 'NCH'),
'VX': ('Northwestern University', 'Stomach adenocarcinoma', 'NCH'),
'VZ': ('Albert Einstein Medical Center', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'W2': ('Medical College of Wisconsin', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'W3': ('John Wayne Cancer Center', 'Skin Cutaneous Melanoma', 'NCH'),
'W4': ('University of North Carolina', 'Testicular Germ Cell Tumors', 'NCH'),
'W5': ('Mayo Clinic Rochester', 'Cholangiocarcinoma', 'NCH'),
'W6': ('UCSF', 'Cholangiocarcinoma', 'NCH'),
'W7': ('Garvan Institute of Medical Research', 'Cholangiocarcinoma', 'NCH'),
'W8': ('Greenville Health System', 'Breast invasive carcinoma', 'NCH'),
'W9': ('University of Kansas', 'Brain Lower Grade Glioma', 'NCH'),
'WA': ('University of Schleswig-Holstein', 'Head and Neck squamous cell carcinoma', 'NCH'),
'WB': ('Erasmus MC', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'WC': ('MD Anderson', 'Uveal Melanoma', 'NCH'),
'WD': ('Emory University', 'Cholangiocarcinoma', 'NCH'),
'WE': ('Norfolk and Norwich Hospital', 'Skin Cutaneous Melanoma', 'NCH'),
'WF': ('Greenville Health System', 'Pancreatic adenocarcinoma', 'NCH'),
'WG': ('Greenville Health System', 'Lung squamous cell carcinoma', 'NCH'),
'WH': ('Greenville Health System', 'Brain Lower Grade Glioma', 'NCH'),
'WJ': ('Greenville Health System', 'Liver hepatocellular carcinoma', 'NCH'),
'WK': ('Brigham and Women\'s Hospital', 'Sarcoma', 'NCH'),
'WL': ('University of Kansas', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'WM': ('University of Kansas', 'Kidney renal clear cell carcinoma', 'NCH'),
'WN': ('University of Kansas', 'Kidney renal papillary cell carcinoma', 'NCH'),
'WP': ('University of Kansas', 'Sarcoma', 'NCH'),
'WQ': ('University of Kansas', 'Liver hepatocellular carcinoma', 'NCH'),
'WR': ('University of Kansas', 'Ovarian serous cystadenocarcinoma', 'NCH'),
'WS': ('University of Kansas', 'Colon adenocarcinoma', 'NCH'),
'WT': ('University of Kansas', 'Breast invasive carcinoma', 'NCH'),
'WU': ('Wake Forest University', 'Colon adenocarcinoma', 'NCH'),
'WW': ('Wake Forest University', 'Prostate adenocarcinoma', 'NCH'),
'WX': ('Yale University', 'Liver hepatocellular carcinoma', 'NCH'),
'WY': ('Johns Hopkins', 'Brain Lower Grade Glioma', 'NCH'),
'WZ': ('International Genomics Consortium', 'Testicular Germ Cell Tumors', 'NCH'),
'X2': ('University of Washington', 'Sarcoma', 'NCH'),
'X3': ('Cleveland Clinic Foundation', 'Testicular Germ Cell Tumors', 'NCH'),
'X4': ('Institute for Medical Research', 'Prostate adenocarcinoma', 'NCH'),
'X5': ('Institute of Human Virology Nigeria', 'Bladder Urothelial Carcinoma', 'NCH'),
'X6': ('University of Iowa', 'Sarcoma', 'NCH'),
'X7': ('ABS IUPUI', 'Thymoma', 'NCH'),
'X8': ('St. Joseph\'s Hospital Arizona', 'Esophageal carcinoma ', 'NCH'),
'X9': ('University of California, Davis', 'Sarcoma', 'NCH'),
'XA': ('University of Minnesota', 'Prostate adenocarcinoma', 'NCH'),
'XB': ('Albert Einstein Medical Center', 'Esophageal carcinoma ', 'NCH'),
'XC': ('Albert Einstein Medical Center', 'Lung squamous cell carcinoma', 'NCH'),
'XD': ('Providence Portland Medical Center', 'Pancreatic adenocarcinoma', 'NCH'),
'XE': ('University of Southern California', 'Testicular Germ Cell Tumors', 'NCH'),
'XF': ('University of Southern California', 'Bladder Urothelial Carcinoma', 'NCH'),
'XG': ('BLN UT Southwestern Medical Center at Dallas', 'Pheochromocytoma and Paraganglioma', 'NCH'),
'XH': ('BLN Baylor', 'Thymoma', 'NCH'),
'XJ': ('University of Kansas', 'Prostate adenocarcinoma', 'NCH'),
'XK': ('Mayo Clinic Arizona', 'Prostate adenocarcinoma', 'NCH'),
'XM': ('MSKCC', 'Thymoma', 'NCH'),
'XN': ('University of Sao Paulo', 'Pancreatic adenocarcinoma', 'NCH'),
'XP': ('University of Sao Paulo', 'Esophageal carcinoma ', 'NCH'),
'XQ': ('University of Sao Paulo', 'Prostate adenocarcinoma', 'NCH'),
'XR': ('University of Sao Paulo', 'Liver hepatocellular carcinoma', 'NCH'),
'XS': ('University of Sao Paulo', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'XT': ('Johns Hopkins', 'Mesothelioma', 'NCH'),
'XU': ('University Health Network', 'Thymoma', 'NCH'),
'XV': ('Capital Biosciences', 'Skin Cutaneous Melanoma', 'NCH'),
'XX': ('Spectrum Health', 'Breast invasive carcinoma', 'NCH'),
'XY': ('Spectrum Health', 'Testicular Germ Cell Tumors', 'NCH'),
'Y3': ('University of New Mexico', 'Acute Myeloid Leukemia', 'NCH'),
'Y5': ('University of Arizona', 'Sarcoma', 'NCH'),
'Y6': ('University of Arizona', 'Prostate adenocarcinoma', 'NCH'),
'Y8': ('Spectrum Health', 'Kidney renal papillary cell carcinoma', 'NCH'),
'YA': ('Spectrum Health', 'Liver hepatocellular carcinoma', 'NCH'),
'YB': ('Spectrum Health', 'Pancreatic adenocarcinoma', 'NCH'),
'YC': ('Spectrum Health', 'Bladder Urothelial Carcinoma', 'NCH'),
'YD': ('Spectrum Health', 'Skin Cutaneous Melanoma', 'NCH'),
'YF': ('University of Puerto Rico', 'Bladder Urothelial Carcinoma', 'NCH'),
'YG': ('University of Puerto Rico', 'Skin Cutaneous Melanoma', 'NCH'),
'YH': ('Stanford University', 'Pancreatic adenocarcinoma', 'NCH'),
'YJ': ('Stanford University', 'Prostate adenocarcinoma', 'NCH'),
'YL': ('PROCURE Biobank', 'Prostate adenocarcinoma', 'NCH'),
'YN': ('University of Arizona', 'Skin Cutaneous Melanoma', 'NCH'),
'YR': ('Barretos Cancer Hospital', 'Cholangiocarcinoma', 'NCH'),
'YS': ('Barretos Cancer Hospital', 'Mesothelioma', 'NCH'),
'YT': ('Barretos Cancer Hospital', 'Thymoma', 'NCH'),
'YU': ('Barretos Cancer Hospital', 'Testicular Germ Cell Tumors', 'NCH'),
'YV': ('MSKCC', 'Uveal Melanoma', 'NCH'),
'YW': ('Albert Einstein Medical Center', 'Sarcoma', 'NCH'),
'YX': ('Emory University', 'Stomach adenocarcinoma', 'NCH'),
'YY': ('Roswell Park', 'Pancreatic adenocarcinoma', 'NCH'),
'YZ': ('The Ohio State University', 'Uveal Melanoma', 'NCH'),
'Z2': ('IDI-IRCCS', 'Skin Cutaneous Melanoma', 'NCH'),
'Z3': ('UCLA', 'Sarcoma', 'NCH'),
'Z4': ('Cureline', 'Sarcoma', 'NCH'),
'Z5': ('Cureline', 'Pancreatic adenocarcinoma', 'NCH'),
'Z6': ('Cureline', 'Esophageal carcinoma ', 'NCH'),
'Z7': ('John Wayne Cancer Center', 'Breast invasive carcinoma', 'NCH'),
'Z8': ('John Wayne Cancer Center', 'Pancreatic adenocarcinoma', 'NCH'),
'ZA': ('Candler', 'Stomach adenocarcinoma', 'NCH'),
'ZB': ('Thoraxklinik', 'Thymoma', 'NCH'),
'ZC': ('University of Mannheim', 'Thymoma', 'NCH'),
'ZD': ('ILSbio', 'Cholangiocarcinoma', 'NCH'),
'ZE': ('Spectrum Health', 'Lung squamous cell carcinoma', 'NCH'),
'ZF': ('University of Sheffield', 'Bladder Urothelial Carcinoma', 'NCH'),
'ZG': ('University Medical Center Hamburg-Eppendorf', 'Prostate adenocarcinoma', 'NCH'),
'ZH': ('University of North Carolina', 'Cholangiocarcinoma', 'NCH'),
'ZJ': ('NCI HRE Branch', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
'ZK': ('University of New Mexico', 'Cholangiocarcinoma', 'NCH'),
'ZL': ('Valley Hospital', 'Thymoma', 'NCH'),
'ZM': ('University of Ulm', 'Testicular Germ Cell Tumors', 'NCH'),
'ZN': ('Brigham and Women\'s Hospital Division of Thoracic Surgery', 'Mesothelioma', 'NCH'),
'ZP': ('Medical College of Wisconsin', 'Liver hepatocellular carcinoma', 'NCH'),
'ZQ': ('Tayside Tissue Bank', 'Stomach adenocarcinoma', 'NCH'),
'ZR': ('Tayside Tissue Bank', 'Esophageal carcinoma ', 'NCH'),
'ZS': ('Tayside Tissue Bank', 'Liver hepatocellular carcinoma', 'NCH'),
'ZT': ('International Genomics Consortium', 'Thymoma', 'NCH'),
'ZU': ('Spectrum Health', 'Cholangiocarcinoma', 'NCH'),
'ZW': ('University of Alabama', 'Pancreatic adenocarcinoma', 'NCH'),
'ZX': ('University of Alabama', 'Cervical squamous cell carcinoma and endocervical adenocarcinoma', 'NCH'),
}
SAMPLE_TYPE = {
'01': ('Primary solid Tumor', 'TP'),
'02': ('Recurrent Solid Tumor', 'TR'),
'03': ('Primary Blood Derived Cancer - Peripheral Blood', 'TB'),
'04': ('Recurrent Blood Derived Cancer - Bone Marrow', 'TRBM'),
'05': ('Additional - New Primary', 'TAP'),
'06': ('Metastatic', 'TM'),
'07': ('Additional Metastatic', 'TAM'),
'08': ('Human Tumor Original Cells', 'THOC'),
'09': ('Primary Blood Derived Cancer - Bone Marrow', 'TBM'),
'10': ('Blood Derived Normal', 'NB'),
'11': ('Solid Tissue Normal', 'NT'),
'12': ('Buccal Cell Normal', 'NBC'),
'13': ('EBV Immortalized Normal', 'NEBV'),
'14': ('Bone Marrow Normal', 'NBM'),
'20': ('Control Analyte', 'CELLC'),
'40': ('Recurrent Blood Derived Cancer - Peripheral Blood', 'TRB'),
'50': ('Cell Lines', 'CELL'),
'60': ('Primary Xenograft Tissue', 'XP'),
'61': ('Cell Line Derived Xenograft Tissue', 'XCL'),
}
| true | true |
f7314a0e4e81b092e4ad4e0a58faca277e3f6357 | 482 | py | Python | measurements/read-ds18b20.py | jazik/raspberry-pi-cottage | c82885db7b265b96e7c1126a0f7af89602cd72d5 | [
"MIT"
] | null | null | null | measurements/read-ds18b20.py | jazik/raspberry-pi-cottage | c82885db7b265b96e7c1126a0f7af89602cd72d5 | [
"MIT"
] | null | null | null | measurements/read-ds18b20.py | jazik/raspberry-pi-cottage | c82885db7b265b96e7c1126a0f7af89602cd72d5 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
from w1thermsensor import W1ThermSensor
if len(sys.argv) == 2:
sensor_id = sys.argv[1]
else:
print('usage: sudo ' + sys.argv[0] + ' <sensor id>')
print('example: sudo ' + sys.argv[0] + ' 00000588806a - Read from an DS18B20 wiht id 00000588806a')
sys.exit(1)
sensor = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, sensor_id)
temperature_in_celsius = sensor.get_temperature()
print('Temp={0:0.1f}*'.format(temperature_in_celsius))
| 28.352941 | 103 | 0.715768 |
import sys
from w1thermsensor import W1ThermSensor
if len(sys.argv) == 2:
sensor_id = sys.argv[1]
else:
print('usage: sudo ' + sys.argv[0] + ' <sensor id>')
print('example: sudo ' + sys.argv[0] + ' 00000588806a - Read from an DS18B20 wiht id 00000588806a')
sys.exit(1)
sensor = W1ThermSensor(W1ThermSensor.THERM_SENSOR_DS18B20, sensor_id)
temperature_in_celsius = sensor.get_temperature()
print('Temp={0:0.1f}*'.format(temperature_in_celsius))
| true | true |
f7314a6598cc9d802c8a25965be8b5c7206c8fd3 | 9,663 | py | Python | contrib/bitrpc/bitrpc.py | dev-x0/genesis-x | 7000c60241e5403b2201338cfcfa1a2c6b0453d0 | [
"MIT"
] | 15 | 2018-06-01T17:06:02.000Z | 2020-11-14T10:24:36.000Z | contrib/bitrpc/bitrpc.py | dev-x0/genesis-x | 7000c60241e5403b2201338cfcfa1a2c6b0453d0 | [
"MIT"
] | null | null | null | contrib/bitrpc/bitrpc.py | dev-x0/genesis-x | 7000c60241e5403b2201338cfcfa1a2c6b0453d0 | [
"MIT"
] | 15 | 2018-06-26T09:47:48.000Z | 2021-12-22T06:32:57.000Z | from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6666")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6666")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 28.588757 | 101 | 0.573424 | from jsonrpc import ServiceProxy
import sys
import string
import getpass
rpcuser = ""
rpcpass = ""
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6666")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6666")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| false | true |
f7314b6028b09e35ddb4a9b228cfb6aadcb1d26a | 939 | py | Python | dwi_ml/models/utils/fisher_von_mises.py | EmmaRenauld/dwi_ml | f2f776199dd886509d15520aa68099a8c870a233 | [
"MIT"
] | null | null | null | dwi_ml/models/utils/fisher_von_mises.py | EmmaRenauld/dwi_ml | f2f776199dd886509d15520aa68099a8c870a233 | [
"MIT"
] | null | null | null | dwi_ml/models/utils/fisher_von_mises.py | EmmaRenauld/dwi_ml | f2f776199dd886509d15520aa68099a8c870a233 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import torch
"""
The complete formulas and explanations are available in our doc:
https://dwi-ml.readthedocs.io/en/latest/formulas.html
"""
def fisher_von_mises_log_prob_vector(mus, kappa, targets):
log_c = np.log(kappa) - np.log(2 * np.pi) - np.log(np.exp(kappa) -
np.exp(-kappa))
log_prob = log_c + (kappa * (mus * targets).sum(axis=-1))
return log_prob
def fisher_von_mises_log_prob(mus, kappa, targets, eps=1e-6):
log_2pi = np.log(2 * np.pi).astype(np.float32)
# Add an epsilon in case kappa is too small (i.e. a uniform
# distribution)
log_diff_exp_kappa = torch.log(torch.exp(kappa) - torch.exp(-kappa) + eps)
log_c = torch.log(kappa) - log_2pi - log_diff_exp_kappa
batch_dot_product = torch.sum(mus * targets, dim=1)
log_prob = log_c + (kappa * batch_dot_product)
return log_prob
| 29.34375 | 78 | 0.644302 |
import numpy as np
import torch
def fisher_von_mises_log_prob_vector(mus, kappa, targets):
log_c = np.log(kappa) - np.log(2 * np.pi) - np.log(np.exp(kappa) -
np.exp(-kappa))
log_prob = log_c + (kappa * (mus * targets).sum(axis=-1))
return log_prob
def fisher_von_mises_log_prob(mus, kappa, targets, eps=1e-6):
log_2pi = np.log(2 * np.pi).astype(np.float32)
log_diff_exp_kappa = torch.log(torch.exp(kappa) - torch.exp(-kappa) + eps)
log_c = torch.log(kappa) - log_2pi - log_diff_exp_kappa
batch_dot_product = torch.sum(mus * targets, dim=1)
log_prob = log_c + (kappa * batch_dot_product)
return log_prob
| true | true |
f7314ba8e10dd03e076c71b94e712ad4b4b62c44 | 664 | py | Python | docs/src/additional_responses/tutorial004.py | patrickmckenna/fastapi | 9c3c9b6e78768374868d690bc05918d58481e880 | [
"MIT"
] | 2 | 2020-11-01T00:04:05.000Z | 2021-07-21T06:32:20.000Z | docs/src/additional_responses/tutorial004.py | patrickmckenna/fastapi | 9c3c9b6e78768374868d690bc05918d58481e880 | [
"MIT"
] | 1 | 2019-11-02T22:03:59.000Z | 2019-11-02T22:03:59.000Z | docs/src/additional_responses/tutorial004.py | patrickmckenna/fastapi | 9c3c9b6e78768374868d690bc05918d58481e880 | [
"MIT"
] | 1 | 2020-12-19T18:01:20.000Z | 2020-12-19T18:01:20.000Z | from fastapi import FastAPI
from pydantic import BaseModel
from starlette.responses import FileResponse
class Item(BaseModel):
id: str
value: str
responses = {
404: {"description": "Item not found"},
302: {"description": "The item was moved"},
403: {"description": "Not enough privileges"},
}
app = FastAPI()
@app.get(
"/items/{item_id}",
response_model=Item,
responses={**responses, 200: {"content": {"image/png": {}}}},
)
async def read_item(item_id: str, img: bool = None):
if img:
return FileResponse("image.png", media_type="image/png")
else:
return {"id": "foo", "value": "there goes my hero"}
| 21.419355 | 65 | 0.637048 | from fastapi import FastAPI
from pydantic import BaseModel
from starlette.responses import FileResponse
class Item(BaseModel):
id: str
value: str
responses = {
404: {"description": "Item not found"},
302: {"description": "The item was moved"},
403: {"description": "Not enough privileges"},
}
app = FastAPI()
@app.get(
"/items/{item_id}",
response_model=Item,
responses={**responses, 200: {"content": {"image/png": {}}}},
)
async def read_item(item_id: str, img: bool = None):
if img:
return FileResponse("image.png", media_type="image/png")
else:
return {"id": "foo", "value": "there goes my hero"}
| true | true |
f7314be85e0d5c52d7bd7dba9f799e44ab8fed6b | 138 | py | Python | Fango/accounts/apps.py | Niemzok/fango | 37484a11e8bfffb0f6fe451b74501e0ad825b215 | [
"MIT"
] | null | null | null | Fango/accounts/apps.py | Niemzok/fango | 37484a11e8bfffb0f6fe451b74501e0ad825b215 | [
"MIT"
] | null | null | null | Fango/accounts/apps.py | Niemzok/fango | 37484a11e8bfffb0f6fe451b74501e0ad825b215 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'Fango.accounts'
| 17.25 | 39 | 0.797101 | from __future__ import unicode_literals
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'Fango.accounts'
| true | true |
f7314cf1f7f6c192548e293ee5dab1afb019d2fc | 13,083 | py | Python | examples/annotation.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | examples/annotation.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | examples/annotation.py | quattro/numpyro | b7b6e937297ea47c55760446134f84fc82936a9d | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Bayesian Models of Annotation
======================================
In this example, we run MCMC for various crowdsourced annotation models in [1].
All models have discrete latent variables. Under the hood, we enumerate over
(marginalize out) those discrete latent sites in inference. Those models have different
complexity so they are great refererences for those who are new to Pyro/NumPyro
enumeration mechanism. We recommend readers compare the implementations with the
corresponding plate diagrams in [1] to see how concise a Pyro/NumPyro program is.
The interested readers can also refer to [3] for more explanation about enumeration.
The data is taken from Table 1 of reference [2].
Currently, this example does not include postprocessing steps to deal with "Label
Switching" issue (mentioned in section 6.2 of [1]).
**References:**
1. Paun, S., Carpenter, B., Chamberlain, J., Hovy, D., Kruschwitz, U.,
and Poesio, M. (2018). "Comparing bayesian models of annotation"
(https://www.aclweb.org/anthology/Q18-1040/)
2. Dawid, A. P., and Skene, A. M. (1979).
"Maximum likelihood estimation of observer error‐rates using the EM algorithm"
3. "Inference with Discrete Latent Variables"
(http://pyro.ai/examples/enumeration.html)
"""
import argparse
import os
import numpy as np
from jax import nn, random, vmap
import jax.numpy as jnp
import numpyro
from numpyro import handlers
from numpyro.contrib.indexing import Vindex
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS, Predictive
from numpyro.infer.reparam import LocScaleReparam
def get_data():
"""
:return: a tuple of annotator indices and class indices. The first term has shape
`num_positions` whose entries take values from `0` to `num_annotators - 1`.
The second term has shape `num_items x num_positions` whose entries take values
from `0` to `num_classes - 1`.
"""
# NB: the first annotator assessed each item 3 times
positions = np.array([1, 1, 1, 2, 3, 4, 5])
annotations = np.array(
[
[1, 1, 1, 1, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 4],
[1, 1, 2, 2, 1, 2, 2],
[2, 2, 2, 3, 1, 2, 1],
[2, 2, 2, 3, 2, 2, 2],
[2, 2, 2, 3, 3, 2, 2],
[1, 2, 2, 2, 1, 1, 1],
[3, 3, 3, 3, 4, 3, 3],
[2, 2, 2, 2, 2, 2, 3],
[2, 3, 2, 2, 2, 2, 3],
[4, 4, 4, 4, 4, 4, 4],
[2, 2, 2, 3, 3, 4, 3],
[1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 3, 2, 1, 2],
[1, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 1],
[2, 2, 2, 1, 3, 2, 2],
[2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 1],
[2, 2, 2, 3, 2, 2, 2],
[2, 2, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[2, 3, 2, 2, 2, 2, 2],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 1, 1, 2, 1],
[1, 1, 1, 1, 1, 1, 1],
[3, 3, 3, 3, 2, 3, 3],
[1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 3, 2, 3, 2],
[4, 3, 3, 4, 3, 4, 3],
[2, 2, 1, 2, 2, 3, 2],
[2, 3, 2, 3, 2, 3, 3],
[3, 3, 3, 3, 4, 3, 2],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 2, 1, 2, 1, 1, 1],
[2, 3, 2, 2, 2, 2, 2],
[1, 2, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2],
]
)
# we minus 1 because in Python, the first index is 0
return positions - 1, annotations - 1
def multinomial(annotations):
"""
This model corresponds to the plate diagram in Figure 1 of reference [1].
"""
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
zeta = numpyro.sample("zeta", dist.Dirichlet(jnp.ones(num_classes)))
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with numpyro.plate("position", num_positions):
numpyro.sample("y", dist.Categorical(zeta[c]), obs=annotations)
def dawid_skene(positions, annotations):
"""
This model corresponds to the plate diagram in Figure 2 of reference [1].
"""
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("annotator", num_annotators, dim=-2):
with numpyro.plate("class", num_classes):
beta = numpyro.sample("beta", dist.Dirichlet(jnp.ones(num_classes)))
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
# here we use Vindex to allow broadcasting for the second index `c`
# ref: http://num.pyro.ai/en/latest/utilities.html#numpyro.contrib.indexing.vindex
with numpyro.plate("position", num_positions):
numpyro.sample(
"y", dist.Categorical(Vindex(beta)[positions, c, :]), obs=annotations
)
def mace(positions, annotations):
"""
This model corresponds to the plate diagram in Figure 3 of reference [1].
"""
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("annotator", num_annotators):
epsilon = numpyro.sample("epsilon", dist.Dirichlet(jnp.full(num_classes, 10)))
theta = numpyro.sample("theta", dist.Beta(0.5, 0.5))
with numpyro.plate("item", num_items, dim=-2):
# NB: using constant logits for discrete uniform prior
# (NumPyro does not have DiscreteUniform distribution yet)
c = numpyro.sample("c", dist.Categorical(logits=jnp.zeros(num_classes)))
with numpyro.plate("position", num_positions):
s = numpyro.sample("s", dist.Bernoulli(1 - theta[positions]))
probs = jnp.where(
s[..., None] == 0, nn.one_hot(c, num_classes), epsilon[positions]
)
numpyro.sample("y", dist.Categorical(probs), obs=annotations)
def hierarchical_dawid_skene(positions, annotations):
"""
This model corresponds to the plate diagram in Figure 4 of reference [1].
"""
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
# NB: we define `beta` as the `logits` of `y` likelihood; but `logits` is
# invariant up to a constant, so we'll follow [1]: fix the last term of `beta`
# to 0 and only define hyperpriors for the first `num_classes - 1` terms.
zeta = numpyro.sample(
"zeta", dist.Normal(0, 1).expand([num_classes - 1]).to_event(1)
)
omega = numpyro.sample(
"Omega", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
with numpyro.plate("annotator", num_annotators, dim=-2):
with numpyro.plate("class", num_classes):
# non-centered parameterization
with handlers.reparam(config={"beta": LocScaleReparam(0)}):
beta = numpyro.sample("beta", dist.Normal(zeta, omega).to_event(1))
# pad 0 to the last item
beta = jnp.pad(beta, [(0, 0)] * (jnp.ndim(beta) - 1) + [(0, 1)])
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with numpyro.plate("position", num_positions):
logits = Vindex(beta)[positions, c, :]
numpyro.sample("y", dist.Categorical(logits=logits), obs=annotations)
def item_difficulty(annotations):
"""
This model corresponds to the plate diagram in Figure 5 of reference [1].
"""
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
eta = numpyro.sample(
"eta", dist.Normal(0, 1).expand([num_classes - 1]).to_event(1)
)
chi = numpyro.sample(
"Chi", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with handlers.reparam(config={"theta": LocScaleReparam(0)}):
theta = numpyro.sample("theta", dist.Normal(eta[c], chi[c]).to_event(1))
theta = jnp.pad(theta, [(0, 0)] * (jnp.ndim(theta) - 1) + [(0, 1)])
with numpyro.plate("position", annotations.shape[-1]):
numpyro.sample("y", dist.Categorical(logits=theta), obs=annotations)
def logistic_random_effects(positions, annotations):
"""
This model corresponds to the plate diagram in Figure 5 of reference [1].
"""
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
zeta = numpyro.sample(
"zeta", dist.Normal(0, 1).expand([num_classes - 1]).to_event(1)
)
omega = numpyro.sample(
"Omega", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
chi = numpyro.sample(
"Chi", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
with numpyro.plate("annotator", num_annotators, dim=-2):
with numpyro.plate("class", num_classes):
with handlers.reparam(config={"beta": LocScaleReparam(0)}):
beta = numpyro.sample("beta", dist.Normal(zeta, omega).to_event(1))
beta = jnp.pad(beta, [(0, 0)] * (jnp.ndim(beta) - 1) + [(0, 1)])
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with handlers.reparam(config={"theta": LocScaleReparam(0)}):
theta = numpyro.sample("theta", dist.Normal(0, chi[c]).to_event(1))
theta = jnp.pad(theta, [(0, 0)] * (jnp.ndim(theta) - 1) + [(0, 1)])
with numpyro.plate("position", num_positions):
logits = Vindex(beta)[positions, c, :] - theta
numpyro.sample("y", dist.Categorical(logits=logits), obs=annotations)
NAME_TO_MODEL = {
"mn": multinomial,
"ds": dawid_skene,
"mace": mace,
"hds": hierarchical_dawid_skene,
"id": item_difficulty,
"lre": logistic_random_effects,
}
def main(args):
annotators, annotations = get_data()
model = NAME_TO_MODEL[args.model]
data = (
(annotations,)
if model in [multinomial, item_difficulty]
else (annotators, annotations)
)
mcmc = MCMC(
NUTS(model),
num_warmup=args.num_warmup,
num_samples=args.num_samples,
num_chains=args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True,
)
mcmc.run(random.PRNGKey(0), *data)
mcmc.print_summary()
posterior_samples = mcmc.get_samples()
predictive = Predictive(model, posterior_samples, infer_discrete=True)
discrete_samples = predictive(random.PRNGKey(1), *data)
item_class = vmap(lambda x: jnp.bincount(x, length=4), in_axes=1)(
discrete_samples["c"].squeeze(-1)
)
print("Histogram of the predicted class of each item:")
row_format = "{:>10}" * 5
print(row_format.format("", *["c={}".format(i) for i in range(4)]))
for i, row in enumerate(item_class):
print(row_format.format(f"item[{i}]", *row))
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.7.2")
parser = argparse.ArgumentParser(description="Bayesian Models of Annotation")
parser.add_argument("-n", "--num-samples", nargs="?", default=1000, type=int)
parser.add_argument("--num-warmup", nargs="?", default=1000, type=int)
parser.add_argument("--num-chains", nargs="?", default=1, type=int)
parser.add_argument(
"--model",
nargs="?",
default="ds",
help='one of "mn" (multinomial), "ds" (dawid_skene), "mace",'
' "hds" (hierarchical_dawid_skene),'
' "id" (item_difficulty), "lre" (logistic_random_effects)',
)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
| 37.38 | 90 | 0.591072 |
import argparse
import os
import numpy as np
from jax import nn, random, vmap
import jax.numpy as jnp
import numpyro
from numpyro import handlers
from numpyro.contrib.indexing import Vindex
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS, Predictive
from numpyro.infer.reparam import LocScaleReparam
def get_data():
positions = np.array([1, 1, 1, 2, 3, 4, 5])
annotations = np.array(
[
[1, 1, 1, 1, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 4],
[1, 1, 2, 2, 1, 2, 2],
[2, 2, 2, 3, 1, 2, 1],
[2, 2, 2, 3, 2, 2, 2],
[2, 2, 2, 3, 3, 2, 2],
[1, 2, 2, 2, 1, 1, 1],
[3, 3, 3, 3, 4, 3, 3],
[2, 2, 2, 2, 2, 2, 3],
[2, 3, 2, 2, 2, 2, 3],
[4, 4, 4, 4, 4, 4, 4],
[2, 2, 2, 3, 3, 4, 3],
[1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 3, 2, 1, 2],
[1, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 1],
[2, 2, 2, 1, 3, 2, 2],
[2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 2, 2, 2, 1],
[2, 2, 2, 3, 2, 2, 2],
[2, 2, 1, 2, 2, 2, 2],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[2, 3, 2, 2, 2, 2, 2],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 2, 1, 1, 2, 1],
[1, 1, 1, 1, 1, 1, 1],
[3, 3, 3, 3, 2, 3, 3],
[1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2],
[2, 2, 2, 3, 2, 3, 2],
[4, 3, 3, 4, 3, 4, 3],
[2, 2, 1, 2, 2, 3, 2],
[2, 3, 2, 3, 2, 3, 3],
[3, 3, 3, 3, 4, 3, 2],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 2, 1, 2, 1, 1, 1],
[2, 3, 2, 2, 2, 2, 2],
[1, 2, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2],
]
)
return positions - 1, annotations - 1
def multinomial(annotations):
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
zeta = numpyro.sample("zeta", dist.Dirichlet(jnp.ones(num_classes)))
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with numpyro.plate("position", num_positions):
numpyro.sample("y", dist.Categorical(zeta[c]), obs=annotations)
def dawid_skene(positions, annotations):
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("annotator", num_annotators, dim=-2):
with numpyro.plate("class", num_classes):
beta = numpyro.sample("beta", dist.Dirichlet(jnp.ones(num_classes)))
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
ition", num_positions):
numpyro.sample(
"y", dist.Categorical(Vindex(beta)[positions, c, :]), obs=annotations
)
def mace(positions, annotations):
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("annotator", num_annotators):
epsilon = numpyro.sample("epsilon", dist.Dirichlet(jnp.full(num_classes, 10)))
theta = numpyro.sample("theta", dist.Beta(0.5, 0.5))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(logits=jnp.zeros(num_classes)))
with numpyro.plate("position", num_positions):
s = numpyro.sample("s", dist.Bernoulli(1 - theta[positions]))
probs = jnp.where(
s[..., None] == 0, nn.one_hot(c, num_classes), epsilon[positions]
)
numpyro.sample("y", dist.Categorical(probs), obs=annotations)
def hierarchical_dawid_skene(positions, annotations):
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
# to 0 and only define hyperpriors for the first `num_classes - 1` terms.
zeta = numpyro.sample(
"zeta", dist.Normal(0, 1).expand([num_classes - 1]).to_event(1)
)
omega = numpyro.sample(
"Omega", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
with numpyro.plate("annotator", num_annotators, dim=-2):
with numpyro.plate("class", num_classes):
# non-centered parameterization
with handlers.reparam(config={"beta": LocScaleReparam(0)}):
beta = numpyro.sample("beta", dist.Normal(zeta, omega).to_event(1))
# pad 0 to the last item
beta = jnp.pad(beta, [(0, 0)] * (jnp.ndim(beta) - 1) + [(0, 1)])
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with numpyro.plate("position", num_positions):
logits = Vindex(beta)[positions, c, :]
numpyro.sample("y", dist.Categorical(logits=logits), obs=annotations)
def item_difficulty(annotations):
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
eta = numpyro.sample(
"eta", dist.Normal(0, 1).expand([num_classes - 1]).to_event(1)
)
chi = numpyro.sample(
"Chi", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with handlers.reparam(config={"theta": LocScaleReparam(0)}):
theta = numpyro.sample("theta", dist.Normal(eta[c], chi[c]).to_event(1))
theta = jnp.pad(theta, [(0, 0)] * (jnp.ndim(theta) - 1) + [(0, 1)])
with numpyro.plate("position", annotations.shape[-1]):
numpyro.sample("y", dist.Categorical(logits=theta), obs=annotations)
def logistic_random_effects(positions, annotations):
num_annotators = int(np.max(positions)) + 1
num_classes = int(np.max(annotations)) + 1
num_items, num_positions = annotations.shape
with numpyro.plate("class", num_classes):
zeta = numpyro.sample(
"zeta", dist.Normal(0, 1).expand([num_classes - 1]).to_event(1)
)
omega = numpyro.sample(
"Omega", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
chi = numpyro.sample(
"Chi", dist.HalfNormal(1).expand([num_classes - 1]).to_event(1)
)
with numpyro.plate("annotator", num_annotators, dim=-2):
with numpyro.plate("class", num_classes):
with handlers.reparam(config={"beta": LocScaleReparam(0)}):
beta = numpyro.sample("beta", dist.Normal(zeta, omega).to_event(1))
beta = jnp.pad(beta, [(0, 0)] * (jnp.ndim(beta) - 1) + [(0, 1)])
pi = numpyro.sample("pi", dist.Dirichlet(jnp.ones(num_classes)))
with numpyro.plate("item", num_items, dim=-2):
c = numpyro.sample("c", dist.Categorical(pi))
with handlers.reparam(config={"theta": LocScaleReparam(0)}):
theta = numpyro.sample("theta", dist.Normal(0, chi[c]).to_event(1))
theta = jnp.pad(theta, [(0, 0)] * (jnp.ndim(theta) - 1) + [(0, 1)])
with numpyro.plate("position", num_positions):
logits = Vindex(beta)[positions, c, :] - theta
numpyro.sample("y", dist.Categorical(logits=logits), obs=annotations)
NAME_TO_MODEL = {
"mn": multinomial,
"ds": dawid_skene,
"mace": mace,
"hds": hierarchical_dawid_skene,
"id": item_difficulty,
"lre": logistic_random_effects,
}
def main(args):
annotators, annotations = get_data()
model = NAME_TO_MODEL[args.model]
data = (
(annotations,)
if model in [multinomial, item_difficulty]
else (annotators, annotations)
)
mcmc = MCMC(
NUTS(model),
num_warmup=args.num_warmup,
num_samples=args.num_samples,
num_chains=args.num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True,
)
mcmc.run(random.PRNGKey(0), *data)
mcmc.print_summary()
posterior_samples = mcmc.get_samples()
predictive = Predictive(model, posterior_samples, infer_discrete=True)
discrete_samples = predictive(random.PRNGKey(1), *data)
item_class = vmap(lambda x: jnp.bincount(x, length=4), in_axes=1)(
discrete_samples["c"].squeeze(-1)
)
print("Histogram of the predicted class of each item:")
row_format = "{:>10}" * 5
print(row_format.format("", *["c={}".format(i) for i in range(4)]))
for i, row in enumerate(item_class):
print(row_format.format(f"item[{i}]", *row))
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.7.2")
parser = argparse.ArgumentParser(description="Bayesian Models of Annotation")
parser.add_argument("-n", "--num-samples", nargs="?", default=1000, type=int)
parser.add_argument("--num-warmup", nargs="?", default=1000, type=int)
parser.add_argument("--num-chains", nargs="?", default=1, type=int)
parser.add_argument(
"--model",
nargs="?",
default="ds",
help='one of "mn" (multinomial), "ds" (dawid_skene), "mace",'
' "hds" (hierarchical_dawid_skene),'
' "id" (item_difficulty), "lre" (logistic_random_effects)',
)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
| true | true |
f7314e8a496b92d9bd05e6902788e7b9a672b0dc | 11,218 | py | Python | plugins/sqlfluff-templater-dbt/test/templater_test.py | WittierDinosaur/sqlfluff | edc4a2c47cd4f0a5f53dbde36e50da19ec08dda7 | [
"MIT"
] | null | null | null | plugins/sqlfluff-templater-dbt/test/templater_test.py | WittierDinosaur/sqlfluff | edc4a2c47cd4f0a5f53dbde36e50da19ec08dda7 | [
"MIT"
] | null | null | null | plugins/sqlfluff-templater-dbt/test/templater_test.py | WittierDinosaur/sqlfluff | edc4a2c47cd4f0a5f53dbde36e50da19ec08dda7 | [
"MIT"
] | null | null | null | """Tests for the dbt templater."""
import glob
import os
import pytest
import logging
from pathlib import Path
from sqlfluff.core import FluffConfig, Lexer, Linter
from sqlfluff.core.errors import SQLTemplaterSkipFile
from test.fixtures.dbt.templater import ( # noqa: F401
DBT_FLUFF_CONFIG,
dbt_templater,
project_dir,
)
def test__templater_dbt_missing(dbt_templater, project_dir): # noqa: F811
"""Check that a nice error is returned when dbt module is missing."""
try:
import dbt # noqa: F401
pytest.skip(msg="dbt is installed")
except ModuleNotFoundError:
pass
with pytest.raises(ModuleNotFoundError, match=r"pip install sqlfluff\[dbt\]"):
dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, "models/my_new_project/test.sql"),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
def test__templater_dbt_profiles_dir_expanded(dbt_templater): # noqa: F811
"""Check that the profiles_dir is expanded."""
dbt_templater.sqlfluff_config = FluffConfig(
configs={"templater": {"dbt": {"profiles_dir": "~/.dbt"}}}
)
profiles_dir = dbt_templater._get_profiles_dir()
# Normalise paths to control for OS variance
assert os.path.normpath(profiles_dir) == os.path.normpath(
os.path.expanduser("~/.dbt")
)
@pytest.mark.parametrize(
"fname",
[
# dbt_utils
"use_dbt_utils.sql",
# macro calling another macro
"macro_in_macro.sql",
# config.get(...)
"use_headers.sql",
# var(...)
"use_var.sql",
# {# {{ 1 + 2 }} #}
"templated_inside_comment.sql",
# {{ dbt_utils.last_day(
"last_day.sql",
],
)
def test__templater_dbt_templating_result(
project_dir, dbt_templater, fname # noqa: F811
):
"""Test that input sql file gets templated into output sql file."""
templated_file, _ = dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, "models/my_new_project/", fname),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
assert (
str(templated_file)
== open("plugins/sqlfluff-templater-dbt/test/fixtures/dbt/" + fname).read()
)
@pytest.mark.parametrize(
"fnames_input, fnames_expected_sequence",
[
[
(
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
# c.sql is not present in the original list and should not appear here,
# even though b.sql depends on it. This test ensures that "out of scope"
# files, e.g. those ignored using ".sqlfluffignore" or in directories
# outside what was specified, are not inadvertently processed.
(
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
],
[
(
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "c.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
# c.sql should come before b.sql because b.sql depends on c.sql.
# It also comes first overall because ephemeral models come first.
(
Path("models") / "depends_on_ephemeral" / "c.sql",
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
],
],
)
def test__templater_dbt_sequence_files_ephemeral_dependency(
project_dir, dbt_templater, fnames_input, fnames_expected_sequence # noqa: F811
):
"""Test that dbt templater sequences files based on dependencies."""
result = dbt_templater.sequence_files(
[str(Path(project_dir) / fn) for fn in fnames_input],
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
pd = Path(project_dir)
expected = [str(pd / fn) for fn in fnames_expected_sequence]
assert list(result) == expected
@pytest.mark.parametrize(
"raw_file,templated_file,result",
[
(
"select * from a",
"""
with dbt__CTE__INTERNAL_test as (
select * from a
)select count(*) from dbt__CTE__INTERNAL_test
""",
# The unwrapper should trim the ends.
[
("literal", slice(0, 15, None), slice(0, 15, None)),
],
)
],
)
def test__templater_dbt_slice_file_wrapped_test(
raw_file, templated_file, result, dbt_templater, caplog # noqa: F811
):
"""Test that wrapped queries are sliced safely using _check_for_wrapped()."""
with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"):
_, resp, _ = dbt_templater.slice_file(
raw_file,
templated_file,
)
assert resp == result
@pytest.mark.parametrize(
"fname",
[
"tests/test.sql",
"models/my_new_project/single_trailing_newline.sql",
"models/my_new_project/multiple_trailing_newline.sql",
],
)
def test__templater_dbt_templating_test_lex(
project_dir, dbt_templater, fname # noqa: F811
):
"""A test to demonstrate the lexer works on both dbt models (with any # of trailing newlines) and dbt tests."""
source_fpath = os.path.join(project_dir, fname)
with open(source_fpath, "r") as source_dbt_model:
source_dbt_sql = source_dbt_model.read()
n_trailing_newlines = len(source_dbt_sql) - len(source_dbt_sql.rstrip("\n"))
lexer = Lexer(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
templated_file, _ = dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, fname),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
tokens, lex_vs = lexer.lex(templated_file)
assert (
templated_file.source_str
== "select a\nfrom table_a" + "\n" * n_trailing_newlines
)
assert (
templated_file.templated_str
== "select a\nfrom table_a" + "\n" * n_trailing_newlines
)
def test__templater_dbt_skips_disabled_model(dbt_templater, project_dir): # noqa: F811
"""A disabled dbt model should be skipped."""
with pytest.raises(SQLTemplaterSkipFile, match=r"model was disabled"):
dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, "models/my_new_project/disabled_model.sql"),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
@pytest.mark.parametrize(
"fname",
[
"use_var.sql",
"incremental.sql",
"single_trailing_newline.sql",
"L034_test.sql",
],
)
def test__dbt_templated_models_do_not_raise_lint_error(
project_dir, fname # noqa: F811
):
"""Test that templated dbt models do not raise a linting error."""
lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
lnt = lntr.lint_path(
path=os.path.join(project_dir, "models/my_new_project/", fname)
)
violations = lnt.check_tuples()
assert len(violations) == 0
@pytest.mark.parametrize(
"path", ["models/my_new_project/issue_1608.sql", "snapshots/issue_1771.sql"]
)
def test__dbt_templated_models_fix_does_not_corrupt_file(
project_dir, path # noqa: F811
):
"""Test fix for issue 1608. Previously "sqlfluff fix" corrupted the file."""
for fsp in glob.glob(os.path.join(project_dir, "snapshots", "*FIXED.sql")):
os.remove(fsp)
lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
lnt = lntr.lint_path(os.path.join(project_dir, path), fix=True)
try:
lnt.persist_changes(fixed_file_suffix="FIXED")
with open(os.path.join(project_dir, path + ".after")) as f:
comp_buff = f.read()
with open(os.path.join(project_dir, path.replace(".sql", "FIXED.sql"))) as f:
fixed_buff = f.read()
assert fixed_buff == comp_buff
finally:
for fsp in glob.glob(os.path.join(project_dir, "snapshots", "*FIXED.sql")):
os.remove(fsp)
def test__templater_dbt_templating_absolute_path(
project_dir, dbt_templater # noqa: F811
):
"""Test that absolute path of input path does not cause RuntimeError."""
try:
dbt_templater.process(
in_str="",
fname=os.path.abspath(
os.path.join(project_dir, "models/my_new_project/use_var.sql")
),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
except Exception as e:
pytest.fail(f"Unexpected RuntimeError: {e}")
@pytest.mark.parametrize(
"fname,exception_msg",
[
(
"compiler_error.sql",
"dbt compilation error on file 'models/my_new_project/compiler_error.sql', "
"Unexpected end of template. Jinja was looking for the following tags: 'endfor'",
),
("exception_connect_database.sql", "dbt tried to connect to the database"),
],
)
def test__templater_dbt_handle_exceptions(
project_dir, dbt_templater, fname, exception_msg # noqa: F811
):
"""Test that exceptions during compilation are returned as violation."""
from dbt.adapters.factory import get_adapter
src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/" + fname
target_fpath = os.path.abspath(
os.path.join(project_dir, "models/my_new_project/", fname)
)
# We move the file that throws an error in and out of the project directory
# as dbt throws an error if a node fails to parse while computing the DAG
os.rename(src_fpath, target_fpath)
try:
_, violations = dbt_templater.process(
in_str="",
fname=target_fpath,
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
finally:
get_adapter(dbt_templater.dbt_config).connections.release()
os.rename(target_fpath, src_fpath)
assert violations
# NB: Replace slashes to deal with different plaform paths being returned.
assert violations[0].desc().replace("\\", "/").startswith(exception_msg)
def test__project_dir_does_not_exist_error(dbt_templater, caplog): # noqa: F811
"""Test that an error is logged if the specified dbt project directory doesn't exist."""
dbt_templater.sqlfluff_config = FluffConfig(
configs={"templater": {"dbt": {"project_dir": "./non_existing_directory"}}}
)
logger = logging.getLogger("sqlfluff")
original_propagate_value = logger.propagate
try:
logger.propagate = True
with caplog.at_level(logging.ERROR, logger="sqlfluff.templater"):
dbt_project_dir = dbt_templater._get_project_dir()
assert (
f"dbt_project_dir: {dbt_project_dir} could not be accessed. Check it exists."
in caplog.text
)
finally:
logger.propagate = original_propagate_value
| 35.5 | 115 | 0.639151 |
import glob
import os
import pytest
import logging
from pathlib import Path
from sqlfluff.core import FluffConfig, Lexer, Linter
from sqlfluff.core.errors import SQLTemplaterSkipFile
from test.fixtures.dbt.templater import (
DBT_FLUFF_CONFIG,
dbt_templater,
project_dir,
)
def test__templater_dbt_missing(dbt_templater, project_dir):
try:
import dbt
pytest.skip(msg="dbt is installed")
except ModuleNotFoundError:
pass
with pytest.raises(ModuleNotFoundError, match=r"pip install sqlfluff\[dbt\]"):
dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, "models/my_new_project/test.sql"),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
def test__templater_dbt_profiles_dir_expanded(dbt_templater):
dbt_templater.sqlfluff_config = FluffConfig(
configs={"templater": {"dbt": {"profiles_dir": "~/.dbt"}}}
)
profiles_dir = dbt_templater._get_profiles_dir()
assert os.path.normpath(profiles_dir) == os.path.normpath(
os.path.expanduser("~/.dbt")
)
@pytest.mark.parametrize(
"fname",
[
"use_dbt_utils.sql",
"macro_in_macro.sql",
"use_headers.sql",
"use_var.sql",
d_inside_comment.sql",
"last_day.sql",
],
)
def test__templater_dbt_templating_result(
project_dir, dbt_templater, fname
):
templated_file, _ = dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, "models/my_new_project/", fname),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
assert (
str(templated_file)
== open("plugins/sqlfluff-templater-dbt/test/fixtures/dbt/" + fname).read()
)
@pytest.mark.parametrize(
"fnames_input, fnames_expected_sequence",
[
[
(
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
(
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
],
[
(
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "c.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
(
Path("models") / "depends_on_ephemeral" / "c.sql",
Path("models") / "depends_on_ephemeral" / "a.sql",
Path("models") / "depends_on_ephemeral" / "b.sql",
Path("models") / "depends_on_ephemeral" / "d.sql",
),
],
],
)
def test__templater_dbt_sequence_files_ephemeral_dependency(
project_dir, dbt_templater, fnames_input, fnames_expected_sequence
):
result = dbt_templater.sequence_files(
[str(Path(project_dir) / fn) for fn in fnames_input],
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
pd = Path(project_dir)
expected = [str(pd / fn) for fn in fnames_expected_sequence]
assert list(result) == expected
@pytest.mark.parametrize(
"raw_file,templated_file,result",
[
(
"select * from a",
"""
with dbt__CTE__INTERNAL_test as (
select * from a
)select count(*) from dbt__CTE__INTERNAL_test
""",
[
("literal", slice(0, 15, None), slice(0, 15, None)),
],
)
],
)
def test__templater_dbt_slice_file_wrapped_test(
raw_file, templated_file, result, dbt_templater, caplog
):
with caplog.at_level(logging.DEBUG, logger="sqlfluff.templater"):
_, resp, _ = dbt_templater.slice_file(
raw_file,
templated_file,
)
assert resp == result
@pytest.mark.parametrize(
"fname",
[
"tests/test.sql",
"models/my_new_project/single_trailing_newline.sql",
"models/my_new_project/multiple_trailing_newline.sql",
],
)
def test__templater_dbt_templating_test_lex(
project_dir, dbt_templater, fname
):
source_fpath = os.path.join(project_dir, fname)
with open(source_fpath, "r") as source_dbt_model:
source_dbt_sql = source_dbt_model.read()
n_trailing_newlines = len(source_dbt_sql) - len(source_dbt_sql.rstrip("\n"))
lexer = Lexer(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
templated_file, _ = dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, fname),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
tokens, lex_vs = lexer.lex(templated_file)
assert (
templated_file.source_str
== "select a\nfrom table_a" + "\n" * n_trailing_newlines
)
assert (
templated_file.templated_str
== "select a\nfrom table_a" + "\n" * n_trailing_newlines
)
def test__templater_dbt_skips_disabled_model(dbt_templater, project_dir):
with pytest.raises(SQLTemplaterSkipFile, match=r"model was disabled"):
dbt_templater.process(
in_str="",
fname=os.path.join(project_dir, "models/my_new_project/disabled_model.sql"),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
@pytest.mark.parametrize(
"fname",
[
"use_var.sql",
"incremental.sql",
"single_trailing_newline.sql",
"L034_test.sql",
],
)
def test__dbt_templated_models_do_not_raise_lint_error(
project_dir, fname
):
lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
lnt = lntr.lint_path(
path=os.path.join(project_dir, "models/my_new_project/", fname)
)
violations = lnt.check_tuples()
assert len(violations) == 0
@pytest.mark.parametrize(
"path", ["models/my_new_project/issue_1608.sql", "snapshots/issue_1771.sql"]
)
def test__dbt_templated_models_fix_does_not_corrupt_file(
project_dir, path
):
for fsp in glob.glob(os.path.join(project_dir, "snapshots", "*FIXED.sql")):
os.remove(fsp)
lntr = Linter(config=FluffConfig(configs=DBT_FLUFF_CONFIG))
lnt = lntr.lint_path(os.path.join(project_dir, path), fix=True)
try:
lnt.persist_changes(fixed_file_suffix="FIXED")
with open(os.path.join(project_dir, path + ".after")) as f:
comp_buff = f.read()
with open(os.path.join(project_dir, path.replace(".sql", "FIXED.sql"))) as f:
fixed_buff = f.read()
assert fixed_buff == comp_buff
finally:
for fsp in glob.glob(os.path.join(project_dir, "snapshots", "*FIXED.sql")):
os.remove(fsp)
def test__templater_dbt_templating_absolute_path(
project_dir, dbt_templater
):
try:
dbt_templater.process(
in_str="",
fname=os.path.abspath(
os.path.join(project_dir, "models/my_new_project/use_var.sql")
),
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
except Exception as e:
pytest.fail(f"Unexpected RuntimeError: {e}")
@pytest.mark.parametrize(
"fname,exception_msg",
[
(
"compiler_error.sql",
"dbt compilation error on file 'models/my_new_project/compiler_error.sql', "
"Unexpected end of template. Jinja was looking for the following tags: 'endfor'",
),
("exception_connect_database.sql", "dbt tried to connect to the database"),
],
)
def test__templater_dbt_handle_exceptions(
project_dir, dbt_templater, fname, exception_msg
):
from dbt.adapters.factory import get_adapter
src_fpath = "plugins/sqlfluff-templater-dbt/test/fixtures/dbt/error_models/" + fname
target_fpath = os.path.abspath(
os.path.join(project_dir, "models/my_new_project/", fname)
)
os.rename(src_fpath, target_fpath)
try:
_, violations = dbt_templater.process(
in_str="",
fname=target_fpath,
config=FluffConfig(configs=DBT_FLUFF_CONFIG),
)
finally:
get_adapter(dbt_templater.dbt_config).connections.release()
os.rename(target_fpath, src_fpath)
assert violations
assert violations[0].desc().replace("\\", "/").startswith(exception_msg)
def test__project_dir_does_not_exist_error(dbt_templater, caplog):
dbt_templater.sqlfluff_config = FluffConfig(
configs={"templater": {"dbt": {"project_dir": "./non_existing_directory"}}}
)
logger = logging.getLogger("sqlfluff")
original_propagate_value = logger.propagate
try:
logger.propagate = True
with caplog.at_level(logging.ERROR, logger="sqlfluff.templater"):
dbt_project_dir = dbt_templater._get_project_dir()
assert (
f"dbt_project_dir: {dbt_project_dir} could not be accessed. Check it exists."
in caplog.text
)
finally:
logger.propagate = original_propagate_value
| true | true |
f7314e8b65485bdd5e00c5546138536c89f6ad88 | 819 | py | Python | classy_config/_util.py | fisher60/classy-config | abc8016f9fef328b1410ede75833429b05e20e1a | [
"MIT"
] | 7 | 2022-01-04T20:24:53.000Z | 2022-02-21T19:31:57.000Z | classy_config/_util.py | fisher60/classy-config | abc8016f9fef328b1410ede75833429b05e20e1a | [
"MIT"
] | 13 | 2022-01-04T18:53:08.000Z | 2022-02-25T11:01:29.000Z | classy_config/_util.py | fisher60/classy-config | abc8016f9fef328b1410ede75833429b05e20e1a | [
"MIT"
] | 1 | 2022-02-14T22:06:11.000Z | 2022-02-14T22:06:11.000Z | from typing import Any, MutableMapping, Optional
def merge_dicts(a: MutableMapping[str, Any], b: MutableMapping[str, Any], path: Optional[list] = None) -> MutableMapping[str, Any]:
"""
Merge the keys and values of the two dicts.
:param a:
:param b:
:param path:
:return:
:raises ValueError: When both dicts assign the same key, with different values.
"""
if path is None:
path = []
for key in b:
if key not in a:
a[key] = b[key]
continue
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
raise ValueError(f"Conflict at {'.'.join(path + [str(key)])}")
return a
| 26.419355 | 131 | 0.566545 | from typing import Any, MutableMapping, Optional
def merge_dicts(a: MutableMapping[str, Any], b: MutableMapping[str, Any], path: Optional[list] = None) -> MutableMapping[str, Any]:
if path is None:
path = []
for key in b:
if key not in a:
a[key] = b[key]
continue
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass
else:
raise ValueError(f"Conflict at {'.'.join(path + [str(key)])}")
return a
| true | true |
f7314ef4fe1b0652cfdde116056e0cf2ca53f56b | 1,996 | py | Python | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/vision_getandsaveimage.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/vision_getandsaveimage.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/vision_getandsaveimage.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | 1 | 2020-10-06T07:44:12.000Z | 2020-10-06T07:44:12.000Z | #! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Get an image. Display it and save it using PIL."""
import qi
import argparse
import sys
import time
import Image
def main(session):
"""
First get an image, then show it on the screen with PIL.
"""
# Get the service ALVideoDevice.
video_service = session.service("ALVideoDevice")
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = video_service.subscribe("python_client", resolution, colorSpace, 5)
t0 = time.time()
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = video_service.getImageRemote(videoClient)
t1 = time.time()
# Time the image transfer.
print "acquisition delay ", t1 - t0
video_service.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
image_string = str(bytearray(array))
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), image_string)
# Save the image.
im.save("camImage.png", "PNG")
im.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session)
| 27.342466 | 98 | 0.629259 |
"""Example: Get an image. Display it and save it using PIL."""
import qi
import argparse
import sys
import time
import Image
def main(session):
"""
First get an image, then show it on the screen with PIL.
"""
video_service = session.service("ALVideoDevice")
resolution = 2
colorSpace = 11
videoClient = video_service.subscribe("python_client", resolution, colorSpace, 5)
t0 = time.time()
naoImage = video_service.getImageRemote(videoClient)
t1 = time.time()
print "acquisition delay ", t1 - t0
video_service.unsubscribe(videoClient)
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
image_string = str(bytearray(array))
im = Image.fromstring("RGB", (imageWidth, imageHeight), image_string)
im.save("camImage.png", "PNG")
im.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session)
| false | true |
f7314f70d97443e5927a361c54120beae4e4b7f5 | 9,594 | py | Python | cinder/common/config.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | cinder/common/config.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | cinder/common/config.py | rackerlabs/cinder | 4295ff0a64f781c3546f6c6e0816dbb8100133cb | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 NTT corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from cinder.i18n import _
CONF = cfg.CONF
logging.register_options(CONF)
core_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for cinder-api'),
cfg.StrOpt('state_path',
default='/var/lib/cinder',
deprecated_name='pybasedir',
help="Top-level directory for maintaining cinder's state"), ]
debug_opts = [
]
CONF.register_cli_opts(core_opts)
CONF.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=netutils.get_my_ipv4(),
help='IP address of this host'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance host name or IP'),
cfg.IntOpt('glance_port',
default=9292,
min=1, max=65535,
help='Default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance API servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_api_version',
default=1,
help='Version of the glance API to use'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.BoolOpt('glance_api_ssl_compression',
default=False,
help='Enables or disables negotiation of SSL layer '
'compression. In some cases disabling compression '
'can improve data throughput, such as when high '
'network bandwidth is available and you use '
'compressed image formats like qcow2.'),
cfg.StrOpt('glance_ca_certificates_file',
help='Location of ca certificates file to use for glance '
'client requests.'),
cfg.IntOpt('glance_request_timeout',
default=None,
help='http/https timeout value for glance operations. If no '
'value (None) is supplied here, the glanceclient default '
'value is used.'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='The topic that scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='cinder-volume',
help='The topic that volume nodes listen on'),
cfg.StrOpt('backup_topic',
default='cinder-backup',
help='The topic that volume backup nodes listen on'),
cfg.BoolOpt('enable_v1_api',
default=True,
help=_("DEPRECATED: Deploy v1 of the Cinder API.")),
cfg.BoolOpt('enable_v2_api',
default=True,
help=_("Deploy v2 of the Cinder API.")),
cfg.BoolOpt('api_rate_limit',
default=True,
help='Enables or disables rate limit of the API.'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.contrib.'
'select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=['cinder.api.contrib.standard_extensions'],
help='osapi volume extension to load'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='Full class name for the Manager for volume'),
cfg.StrOpt('backup_manager',
default='cinder.backup.manager.BackupManager',
help='Full class name for the Manager for volume backup'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a host name, FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node'),
cfg.StrOpt('default_availability_zone',
default=None,
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '
'the default for new volumes.'),
cfg.BoolOpt('allow_availability_zone_fallback',
default=False,
help='If the requested Cinder availability zone is '
'unavailable, fall back to the value of '
'default_availability_zone, then '
'storage_availability_zone, instead of failing.'),
cfg.StrOpt('default_volume_type',
default=None,
help='Default volume type to use'),
cfg.StrOpt('volume_usage_audit_period',
default='month',
help='Time period for which to generate volume usages. '
'The options are hour, day, month, or year.'),
cfg.StrOpt('rootwrap_config',
default='/etc/cinder/rootwrap.conf',
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Enable monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for a service to be '
'considered up'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('backup_api_class',
default='cinder.backup.api.API',
help='The full class name of the volume backup API class'),
cfg.StrOpt('auth_strategy',
default='keystone',
choices=['noauth', 'keystone', 'deprecated'],
help='The strategy to use for auth. Supports noauth, keystone, '
'and deprecated.'),
cfg.ListOpt('enabled_backends',
default=None,
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help='Whether snapshots count against gigabyte quota'),
cfg.StrOpt('transfer_api_class',
default='cinder.transfer.api.API',
help='The full class name of the volume transfer API class'),
cfg.StrOpt('replication_api_class',
default='cinder.replication.api.API',
help='The full class name of the volume replication API class'),
cfg.StrOpt('consistencygroup_api_class',
default='cinder.consistencygroup.api.API',
help='The full class name of the consistencygroup API class'),
cfg.StrOpt('os_privileged_user_name',
default=None,
help='OpenStack privileged account username. Used for requests '
'to other services (such as Nova) that require an account '
'with special rights.'),
cfg.StrOpt('os_privileged_user_password',
default=None,
help='Password associated with the OpenStack privileged '
'account.',
secret=True),
cfg.StrOpt('os_privileged_user_tenant',
default=None,
help='Tenant name associated with the OpenStack privileged '
'account.'),
cfg.StrOpt('os_privileged_user_auth_url',
default=None,
help='Auth URL associated with the OpenStack privileged '
'account.'),
]
CONF.register_opts(global_opts)
| 43.808219 | 79 | 0.595164 |
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import netutils
from cinder.i18n import _
CONF = cfg.CONF
logging.register_options(CONF)
core_opts = [
cfg.StrOpt('api_paste_config',
default="api-paste.ini",
help='File name for the paste.deploy config for cinder-api'),
cfg.StrOpt('state_path',
default='/var/lib/cinder',
deprecated_name='pybasedir',
help="Top-level directory for maintaining cinder's state"), ]
debug_opts = [
]
CONF.register_cli_opts(core_opts)
CONF.register_cli_opts(debug_opts)
global_opts = [
cfg.StrOpt('my_ip',
default=netutils.get_my_ipv4(),
help='IP address of this host'),
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance host name or IP'),
cfg.IntOpt('glance_port',
default=9292,
min=1, max=65535,
help='Default glance port'),
cfg.ListOpt('glance_api_servers',
default=['$glance_host:$glance_port'],
help='A list of the glance API servers available to cinder '
'([hostname|ip]:port)'),
cfg.IntOpt('glance_api_version',
default=1,
help='Version of the glance API to use'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number retries when downloading an image from glance'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance'),
cfg.BoolOpt('glance_api_ssl_compression',
default=False,
help='Enables or disables negotiation of SSL layer '
'compression. In some cases disabling compression '
'can improve data throughput, such as when high '
'network bandwidth is available and you use '
'compressed image formats like qcow2.'),
cfg.StrOpt('glance_ca_certificates_file',
help='Location of ca certificates file to use for glance '
'client requests.'),
cfg.IntOpt('glance_request_timeout',
default=None,
help='http/https timeout value for glance operations. If no '
'value (None) is supplied here, the glanceclient default '
'value is used.'),
cfg.StrOpt('scheduler_topic',
default='cinder-scheduler',
help='The topic that scheduler nodes listen on'),
cfg.StrOpt('volume_topic',
default='cinder-volume',
help='The topic that volume nodes listen on'),
cfg.StrOpt('backup_topic',
default='cinder-backup',
help='The topic that volume backup nodes listen on'),
cfg.BoolOpt('enable_v1_api',
default=True,
help=_("DEPRECATED: Deploy v1 of the Cinder API.")),
cfg.BoolOpt('enable_v2_api',
default=True,
help=_("Deploy v2 of the Cinder API.")),
cfg.BoolOpt('api_rate_limit',
default=True,
help='Enables or disables rate limit of the API.'),
cfg.ListOpt('osapi_volume_ext_list',
default=[],
help='Specify list of extensions to load when using osapi_'
'volume_extension option with cinder.api.contrib.'
'select_extensions'),
cfg.MultiStrOpt('osapi_volume_extension',
default=['cinder.api.contrib.standard_extensions'],
help='osapi volume extension to load'),
cfg.StrOpt('volume_manager',
default='cinder.volume.manager.VolumeManager',
help='Full class name for the Manager for volume'),
cfg.StrOpt('backup_manager',
default='cinder.backup.manager.BackupManager',
help='Full class name for the Manager for volume backup'),
cfg.StrOpt('scheduler_manager',
default='cinder.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a host name, FQDN, or IP address.'),
# NOTE(vish): default to nova for compatibility with nova installs
cfg.StrOpt('storage_availability_zone',
default='nova',
help='Availability zone of this node'),
cfg.StrOpt('default_availability_zone',
default=None,
help='Default availability zone for new volumes. If not set, '
'the storage_availability_zone option value is used as '
'the default for new volumes.'),
cfg.BoolOpt('allow_availability_zone_fallback',
default=False,
help='If the requested Cinder availability zone is '
'unavailable, fall back to the value of '
'default_availability_zone, then '
'storage_availability_zone, instead of failing.'),
cfg.StrOpt('default_volume_type',
default=None,
help='Default volume type to use'),
cfg.StrOpt('volume_usage_audit_period',
default='month',
help='Time period for which to generate volume usages. '
'The options are hour, day, month, or year.'),
cfg.StrOpt('rootwrap_config',
default='/etc/cinder/rootwrap.conf',
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.BoolOpt('monkey_patch',
default=False,
help='Enable monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[],
help='List of modules/decorators to monkey patch'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for a service to be '
'considered up'),
cfg.StrOpt('volume_api_class',
default='cinder.volume.api.API',
help='The full class name of the volume API class to use'),
cfg.StrOpt('backup_api_class',
default='cinder.backup.api.API',
help='The full class name of the volume backup API class'),
cfg.StrOpt('auth_strategy',
default='keystone',
choices=['noauth', 'keystone', 'deprecated'],
help='The strategy to use for auth. Supports noauth, keystone, '
'and deprecated.'),
cfg.ListOpt('enabled_backends',
default=None,
help='A list of backend names to use. These backend names '
'should be backed by a unique [CONFIG] group '
'with its options'),
cfg.BoolOpt('no_snapshot_gb_quota',
default=False,
help='Whether snapshots count against gigabyte quota'),
cfg.StrOpt('transfer_api_class',
default='cinder.transfer.api.API',
help='The full class name of the volume transfer API class'),
cfg.StrOpt('replication_api_class',
default='cinder.replication.api.API',
help='The full class name of the volume replication API class'),
cfg.StrOpt('consistencygroup_api_class',
default='cinder.consistencygroup.api.API',
help='The full class name of the consistencygroup API class'),
cfg.StrOpt('os_privileged_user_name',
default=None,
help='OpenStack privileged account username. Used for requests '
'to other services (such as Nova) that require an account '
'with special rights.'),
cfg.StrOpt('os_privileged_user_password',
default=None,
help='Password associated with the OpenStack privileged '
'account.',
secret=True),
cfg.StrOpt('os_privileged_user_tenant',
default=None,
help='Tenant name associated with the OpenStack privileged '
'account.'),
cfg.StrOpt('os_privileged_user_auth_url',
default=None,
help='Auth URL associated with the OpenStack privileged '
'account.'),
]
CONF.register_opts(global_opts)
| true | true |
f7314f8b974c2b54dbcb6c11c5211b6c0d1d666e | 3,340 | py | Python | muranoclient/v1/services.py | mail2nsrajesh/python-muranoclient | 08411aa8d20993ac7c4a52b2aa0e73fb6fea4d40 | [
"Apache-2.0"
] | 27 | 2015-04-26T16:05:29.000Z | 2021-01-28T03:31:57.000Z | muranoclient/v1/services.py | mail2nsrajesh/python-muranoclient | 08411aa8d20993ac7c4a52b2aa0e73fb6fea4d40 | [
"Apache-2.0"
] | null | null | null | muranoclient/v1/services.py | mail2nsrajesh/python-muranoclient | 08411aa8d20993ac7c4a52b2aa0e73fb6fea4d40 | [
"Apache-2.0"
] | 14 | 2015-06-12T05:37:50.000Z | 2019-05-02T20:37:42.000Z | # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import posixpath
from muranoclient.common import base
def normalize_path(f):
@functools.wraps(f)
def f_normalize_path(*args, **kwargs):
path = args[2] if len(args) >= 3 else kwargs['path']
# path formally is just absolute unix path
if not posixpath.isabs(path):
raise ValueError("Parameter 'path' should start with '/'")
args = list(args)
if len(args) >= 3:
args[2] = args[2][1:]
else:
kwargs['path'] = kwargs['path'][1:]
return f(*args, **kwargs)
return f_normalize_path
class Service(base.Resource):
def __repr__(self):
return '<Service %s>' % self._info
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
def _add_details(self, info):
if isinstance(info, dict):
for k, v in info.items():
setattr(self, k, v)
class ServiceManager(base.Manager):
resource_class = Service
def list(self, environment_id, session_id=None):
if session_id:
headers = {'X-Configuration-Session': session_id}
else:
headers = {}
return self._list("/v1/environments/{0}/services".
format(environment_id), headers=headers)
@normalize_path
def get(self, environment_id, path, session_id=None):
if session_id:
headers = {'X-Configuration-Session': session_id}
else:
headers = {}
return self._get('/v1/environments/{0}/services/{1}'.
format(environment_id, path), headers=headers)
@normalize_path
def post(self, environment_id, path, data, session_id):
headers = {'X-Configuration-Session': session_id}
result = self._create('/v1/environments/{0}/services/{1}'.
format(environment_id, path), data,
headers=headers, return_raw=True)
if isinstance(result, list):
return [self.resource_class(self, item) for item in result]
else:
return self.resource_class(self, result)
@normalize_path
def put(self, environment_id, path, data, session_id):
headers = {'X-Configuration-Session': session_id}
return self._update('/v1/environments/{0}/services/{1}'.
format(environment_id, path), data,
headers=headers)
@normalize_path
def delete(self, environment_id, path, session_id):
headers = {'X-Configuration-Session': session_id}
path = '/v1/environments/{0}/services/{1}'.format(environment_id, path)
return self._delete(path, headers=headers)
| 32.745098 | 79 | 0.613473 |
import functools
import posixpath
from muranoclient.common import base
def normalize_path(f):
@functools.wraps(f)
def f_normalize_path(*args, **kwargs):
path = args[2] if len(args) >= 3 else kwargs['path']
if not posixpath.isabs(path):
raise ValueError("Parameter 'path' should start with '/'")
args = list(args)
if len(args) >= 3:
args[2] = args[2][1:]
else:
kwargs['path'] = kwargs['path'][1:]
return f(*args, **kwargs)
return f_normalize_path
class Service(base.Resource):
def __repr__(self):
return '<Service %s>' % self._info
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
def _add_details(self, info):
if isinstance(info, dict):
for k, v in info.items():
setattr(self, k, v)
class ServiceManager(base.Manager):
resource_class = Service
def list(self, environment_id, session_id=None):
if session_id:
headers = {'X-Configuration-Session': session_id}
else:
headers = {}
return self._list("/v1/environments/{0}/services".
format(environment_id), headers=headers)
@normalize_path
def get(self, environment_id, path, session_id=None):
if session_id:
headers = {'X-Configuration-Session': session_id}
else:
headers = {}
return self._get('/v1/environments/{0}/services/{1}'.
format(environment_id, path), headers=headers)
@normalize_path
def post(self, environment_id, path, data, session_id):
headers = {'X-Configuration-Session': session_id}
result = self._create('/v1/environments/{0}/services/{1}'.
format(environment_id, path), data,
headers=headers, return_raw=True)
if isinstance(result, list):
return [self.resource_class(self, item) for item in result]
else:
return self.resource_class(self, result)
@normalize_path
def put(self, environment_id, path, data, session_id):
headers = {'X-Configuration-Session': session_id}
return self._update('/v1/environments/{0}/services/{1}'.
format(environment_id, path), data,
headers=headers)
@normalize_path
def delete(self, environment_id, path, session_id):
headers = {'X-Configuration-Session': session_id}
path = '/v1/environments/{0}/services/{1}'.format(environment_id, path)
return self._delete(path, headers=headers)
| true | true |
f7314fe04d0a36817a1cc7e4b30f2ff6ab6dfec8 | 871 | py | Python | conflowgen/tests/domain_models/distribution_model_seeder/test_container_weight_distribution_seeder.py | bbargstaedt/conflowgen | b5b5c0e2df8a605d23ef467aaa3e88aa463a34ee | [
"MIT"
] | 5 | 2022-02-16T11:44:42.000Z | 2022-02-24T20:02:17.000Z | conflowgen/tests/domain_models/distribution_model_seeder/test_container_weight_distribution_seeder.py | bbargstaedt/conflowgen | b5b5c0e2df8a605d23ef467aaa3e88aa463a34ee | [
"MIT"
] | 90 | 2021-12-08T14:05:44.000Z | 2022-03-24T08:53:31.000Z | conflowgen/tests/domain_models/distribution_model_seeder/test_container_weight_distribution_seeder.py | bbargstaedt/conflowgen | b5b5c0e2df8a605d23ef467aaa3e88aa463a34ee | [
"MIT"
] | 5 | 2021-12-07T16:05:15.000Z | 2022-02-16T08:24:07.000Z | """
Check if container weights can be properly seeded.
"""
import unittest
from conflowgen.domain_models.distribution_models.container_weight_distribution import ContainerWeightDistribution
from conflowgen.domain_models.distribution_seeders import container_weight_distribution_seeder
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerWeightDistributionSeeder(unittest.TestCase):
"""
The actual ModeOfTransportField behavior is implemented in peewee.
"""
def setUp(self) -> None:
"""Create container database in memory"""
sqlite_db = setup_sqlite_in_memory_db()
sqlite_db.create_tables([
ContainerWeightDistribution
])
def test_seeding(self):
"""This should just not throw any exception"""
container_weight_distribution_seeder.seed()
| 32.259259 | 114 | 0.766935 |
import unittest
from conflowgen.domain_models.distribution_models.container_weight_distribution import ContainerWeightDistribution
from conflowgen.domain_models.distribution_seeders import container_weight_distribution_seeder
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerWeightDistributionSeeder(unittest.TestCase):
def setUp(self) -> None:
sqlite_db = setup_sqlite_in_memory_db()
sqlite_db.create_tables([
ContainerWeightDistribution
])
def test_seeding(self):
container_weight_distribution_seeder.seed()
| true | true |
f73150728d21634b3692b32fa17efb6464b8c3ef | 2,110 | py | Python | download_paper.py | xiangze/CSpaperTopicViewer | f98bfc3d8771b50448867b15b723ab6af8e6d321 | [
"WTFPL"
] | 1 | 2016-07-10T23:51:12.000Z | 2016-07-10T23:51:12.000Z | download_paper.py | xiangze/cvprpapers | f98bfc3d8771b50448867b15b723ab6af8e6d321 | [
"WTFPL"
] | null | null | null | download_paper.py | xiangze/cvprpapers | f98bfc3d8771b50448867b15b723ab6af8e6d321 | [
"WTFPL"
] | 1 | 2016-08-02T06:34:37.000Z | 2016-08-02T06:34:37.000Z | import httplib2
from bs4 import BeautifulSoup, SoupStrainer
import urllib.request, urllib.error
import os
import re
import sys
def get(url):
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
status, response = http.request(url)
return response
def getlinks(url):
return BeautifulSoup(get(url),"html.parser", parseOnlyThese=SoupStrainer('a'))
def pdfname(file_url,save_folder):
start_index = file_url.rfind("/")+1
return save_folder+"/"+file_url[start_index:]
def savepdf(link,base_url,save_folder):
if link != "#" and link.endswith('pdf'):
outfilename=pdfname(link,save_folder)
if(not os.path.exists(outfilename)):
pdf = urllib.request.urlopen(base_url+link).read()
with open(outfilename, 'wb') as f:
f.write(pdf)
year=2016
conference="cvpr"
argc=len(sys.argv)
if(argc>1):
year=int(sys.argv[1])
if(argc>2):
conference=sys.argv[2]
save_folder=conference+str(year)
if(not os.path.exists(save_folder)):
os.mkdir(save_folder)
if(conference=="cvpr"):
base_url = 'https://openaccess.thecvf.com/'
url=base_url+'CVPR%d?day=all'%year
# print(get(url))
links=getlinks(url)
# print(links)
for link in links:
if link.has_key('href'):
savepdf(link['href'],base_url,save_folder)
elif(conference=="iccv"):
base_url = 'https://openaccess.thecvf.com/'
links=getlinks(base_url+'ICCV%d'%year)
for link in links:
if link.has_key('href'):
savepdf(link['href'],base_url,save_folder)
elif(conference=="nips"):
base_url = 'https://papers.nips.cc/'
links=getlinks(base_url)
for l in links:
if(len(re.findall(str(year),l.text))>0):
turl=l['href']
links_of_year=getlinks(base_url+turl)
print( len(links_of_year))
for l in links_of_year:
links_of_a_paper=getlinks(base_url+l['href'])
for link in links_of_a_paper:
if link.has_key('href'):
savepdf(link['href'],base_url,save_folder)
else:
print("not supperted conference :%s"%conference)
| 27.402597 | 83 | 0.658768 | import httplib2
from bs4 import BeautifulSoup, SoupStrainer
import urllib.request, urllib.error
import os
import re
import sys
def get(url):
http = httplib2.Http(".cache", disable_ssl_certificate_validation=True)
status, response = http.request(url)
return response
def getlinks(url):
return BeautifulSoup(get(url),"html.parser", parseOnlyThese=SoupStrainer('a'))
def pdfname(file_url,save_folder):
start_index = file_url.rfind("/")+1
return save_folder+"/"+file_url[start_index:]
def savepdf(link,base_url,save_folder):
if link != "#" and link.endswith('pdf'):
outfilename=pdfname(link,save_folder)
if(not os.path.exists(outfilename)):
pdf = urllib.request.urlopen(base_url+link).read()
with open(outfilename, 'wb') as f:
f.write(pdf)
year=2016
conference="cvpr"
argc=len(sys.argv)
if(argc>1):
year=int(sys.argv[1])
if(argc>2):
conference=sys.argv[2]
save_folder=conference+str(year)
if(not os.path.exists(save_folder)):
os.mkdir(save_folder)
if(conference=="cvpr"):
base_url = 'https://openaccess.thecvf.com/'
url=base_url+'CVPR%d?day=all'%year
links=getlinks(url)
for link in links:
if link.has_key('href'):
savepdf(link['href'],base_url,save_folder)
elif(conference=="iccv"):
base_url = 'https://openaccess.thecvf.com/'
links=getlinks(base_url+'ICCV%d'%year)
for link in links:
if link.has_key('href'):
savepdf(link['href'],base_url,save_folder)
elif(conference=="nips"):
base_url = 'https://papers.nips.cc/'
links=getlinks(base_url)
for l in links:
if(len(re.findall(str(year),l.text))>0):
turl=l['href']
links_of_year=getlinks(base_url+turl)
print( len(links_of_year))
for l in links_of_year:
links_of_a_paper=getlinks(base_url+l['href'])
for link in links_of_a_paper:
if link.has_key('href'):
savepdf(link['href'],base_url,save_folder)
else:
print("not supperted conference :%s"%conference)
| true | true |
f731507f3bd50184b1ccba6b19d2e6c5da0c828a | 2,096 | py | Python | 2018/picoCTF_2018/Cryptography/SpyFi/manual_soln_for_spyfi.py | solomonbstoner/solomonbston3r-ctf-diary | 2eb5439b157ca0c97db313c9762c6b5d8a714a85 | [
"Unlicense"
] | 14 | 2018-04-01T00:52:42.000Z | 2020-07-11T06:17:49.000Z | 2018/picoCTF_2018/Cryptography/SpyFi/manual_soln_for_spyfi.py | solomonbstoner/solomonbston3r-ctf-diary | 2eb5439b157ca0c97db313c9762c6b5d8a714a85 | [
"Unlicense"
] | 5 | 2018-04-03T00:40:58.000Z | 2021-06-02T13:37:43.000Z | 2018/picoCTF_2018/Cryptography/SpyFi/manual_soln_for_spyfi.py | solomonbstoner/solomonbston3r-ctf-diary | 2eb5439b157ca0c97db313c9762c6b5d8a714a85 | [
"Unlicense"
] | null | null | null | # This exploit script is my first. I have to manually change the paddings before and after the input to get the desired chosen plaintext. I created it to understand how desired chosen plaintext attack works. See automatic_soln_for_spyfi.py for the automatic solution.
from pwn import *
padding_before = "A" * 11
padding_after = "A" * 11
cipher = ''
for char_to_test in range(33,127):
p = remote('2018shell2.picoctf.com', 34490)
p.clean()
#context.log_level = 'DEBUG'
p.recvuntil('report:')
str_to_test = "c00l3$t_5168610" + chr(char_to_test)
bruteforce_str = padding_before + str_to_test + padding_after
p.sendline(bruteforce_str)
cipher = p.recvline()[1:]
p.close()
print "Hex of encrypted input : %s"%(cipher[32*4:32*5])
print "Hex of actual message : %s"%(cipher[32*9:32*10])
print "%d - %s"%(char_to_test,str(cipher[32*4:32*5] == cipher[32*9:32*10]))
#for i in range(0, len(cipher), 32):
# print "%s"%(cipher[i:i+32])
if cipher[32*4:32*5] == cipher[32*9:32*10]:
print chr(char_to_test)
break
'''
# Example of output:
121 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : 4a98d4a9994ef48bf16cc333f9b06335
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
122 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : 742750ba80677cab5621c2604da75c2d
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
123 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : dfedd6a1698d98859c94fe3acfa96635
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
124 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : a845efa04ec8c99b52e6233f9da3d597
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
125 - True
}
'''
| 30.823529 | 267 | 0.756202 |
from pwn import *
padding_before = "A" * 11
padding_after = "A" * 11
cipher = ''
for char_to_test in range(33,127):
p = remote('2018shell2.picoctf.com', 34490)
p.clean()
p.recvuntil('report:')
str_to_test = "c00l3$t_5168610" + chr(char_to_test)
bruteforce_str = padding_before + str_to_test + padding_after
p.sendline(bruteforce_str)
cipher = p.recvline()[1:]
p.close()
print "Hex of encrypted input : %s"%(cipher[32*4:32*5])
print "Hex of actual message : %s"%(cipher[32*9:32*10])
print "%d - %s"%(char_to_test,str(cipher[32*4:32*5] == cipher[32*9:32*10]))
if cipher[32*4:32*5] == cipher[32*9:32*10]:
print chr(char_to_test)
break
'''
# Example of output:
121 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : 4a98d4a9994ef48bf16cc333f9b06335
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
122 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : 742750ba80677cab5621c2604da75c2d
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
123 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : dfedd6a1698d98859c94fe3acfa96635
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
124 - False
[+] Opening connection to 2018shell2.picoctf.com on port 34490: Done
[*] Closed connection to 2018shell2.picoctf.com port 34490
Hex of encrypted input : a845efa04ec8c99b52e6233f9da3d597
Hex of actual message : a845efa04ec8c99b52e6233f9da3d597
125 - True
}
'''
| false | true |
f73150e76382b56bfc3f89148efd010e1fe93f98 | 1,332 | py | Python | utils/logquant_v1.py | listato/Logarithmic-Quantization-of-Parameters-in-Neural-Networks | dbc6a48ab5e0bf4361be459a45598523f2344371 | [
"MIT"
] | 1 | 2022-02-04T10:39:54.000Z | 2022-02-04T10:39:54.000Z | utils/logquant_v1.py | listato/Logarithmic-Quantization-of-Parameters-in-Neural-Networks | dbc6a48ab5e0bf4361be459a45598523f2344371 | [
"MIT"
] | null | null | null | utils/logquant_v1.py | listato/Logarithmic-Quantization-of-Parameters-in-Neural-Networks | dbc6a48ab5e0bf4361be459a45598523f2344371 | [
"MIT"
] | null | null | null | """
Author: CAI JINGYONG @ BeatCraft, Inc & Tokyo University of Agriculture and Technology
placeholder
input: numpy array
output: numpy array
"""
import numpy
class LogQuant:
def __init__(self,layer,bitwidth):
self.layer_data = layer
self.width = bitwidth
self.maxima = numpy.amax(layer)
self.minima = numpy.amin(layer)
self.fsr = self.maxima - self.minima
self.sign = numpy.sign(layer)
pass
def __clip(self, x):
# min = self.fsr-(2**self.width)
min = 4 - (2**self.width)
if(x <= min):
return 0
elif(x >= 4):
return 4 - 1
else:
return x
def __round(self,x):
bridge = numpy.sqrt(2)-1
decimalpart, intpart = numpy.modf(x)
if decimalpart >= bridge:
return numpy.ceil(x)
else:
return numpy.floor(x)
@property
def log_quantize(self):
round = numpy.vectorize(self.__round)
clip = numpy.vectorize(self.__clip)
# numpy.log2(0) -> -infinity == float("-inf") which will be used in clip method
return numpy.array(clip(round(numpy.log2(abs(self.layer_data)))),dtype=numpy.int8)
@property
def de_quantize(self):
x = numpy.power(2.0, self.log_quantized)
return x * self.sign
| 26.64 | 90 | 0.583333 | import numpy
class LogQuant:
def __init__(self,layer,bitwidth):
self.layer_data = layer
self.width = bitwidth
self.maxima = numpy.amax(layer)
self.minima = numpy.amin(layer)
self.fsr = self.maxima - self.minima
self.sign = numpy.sign(layer)
pass
def __clip(self, x):
min = 4 - (2**self.width)
if(x <= min):
return 0
elif(x >= 4):
return 4 - 1
else:
return x
def __round(self,x):
bridge = numpy.sqrt(2)-1
decimalpart, intpart = numpy.modf(x)
if decimalpart >= bridge:
return numpy.ceil(x)
else:
return numpy.floor(x)
@property
def log_quantize(self):
round = numpy.vectorize(self.__round)
clip = numpy.vectorize(self.__clip)
return numpy.array(clip(round(numpy.log2(abs(self.layer_data)))),dtype=numpy.int8)
@property
def de_quantize(self):
x = numpy.power(2.0, self.log_quantized)
return x * self.sign
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.