hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3961e9e30bcaa86704d30f24c9675d41c3d1bb | 362 | py | Python | omnik/users/apps.py | pygabo/omnik | 579b20671515d8a38b56df8c5bc837bd201ec7b0 | [
"MIT"
] | null | null | null | omnik/users/apps.py | pygabo/omnik | 579b20671515d8a38b56df8c5bc837bd201ec7b0 | [
"MIT"
] | null | null | null | omnik/users/apps.py | pygabo/omnik | 579b20671515d8a38b56df8c5bc837bd201ec7b0 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = "omnik.users"
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
try:
import users.signals # noqa F401
except ImportError:
pass
| 21.294118 | 45 | 0.569061 | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = "omnik.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals
except ImportError:
pass
| true | true |
1c3961f1f78f462d00857e47927e65b6a6d8eba1 | 141,178 | py | Python | lib/sqlalchemy/sql/expression.py | obeattie/sqlalchemy | 376007fed7746d494dcb0166b22e512bfece02cd | [
"MIT"
] | 2 | 2016-05-09T09:17:35.000Z | 2016-08-03T16:30:16.000Z | lib/sqlalchemy/sql/expression.py | clones/sqlalchemy | c9f08aa78a48ba53dd221d3c5de54e5956ecf806 | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/expression.py | clones/sqlalchemy | c9f08aa78a48ba53dd221d3c5de54e5956ecf806 | [
"MIT"
] | null | null | null | # expression.py
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the base components of SQL expression trees.
All components are derived from a common base class
:class:`ClauseElement`. Common behaviors are organized
based on class hierarchies, in some cases via mixins.
All object construction from this package occurs via functions which
in some cases will construct composite :class:`ClauseElement` structures
together, and in other cases simply return a single :class:`ClauseElement`
constructed directly. The function interface affords a more "DSL-ish"
feel to constructing SQL expressions and also allows future class
reorganizations.
Even though classes are not constructed directly from the outside,
most classes which have additional public methods are considered to be
public (i.e. have no leading underscore). Other classes which are
"semi-public" are marked with a single leading underscore; these
classes usually have few or no public methods and are less guaranteed
to stay the same in future releases.
"""
import itertools, re
from operator import attrgetter
from sqlalchemy import util, exc #, types as sqltypes
from sqlalchemy.sql import operators
from sqlalchemy.sql.visitors import Visitable, cloned_traverse
import operator
functions, schema, sql_util, sqltypes = None, None, None, None
DefaultDialect, ClauseAdapter, Annotated = None, None, None
__all__ = [
'Alias', 'ClauseElement',
'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join',
'Select', 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc',
'between', 'bindparam', 'case', 'cast', 'column', 'delete',
'desc', 'distinct', 'except_', 'except_all', 'exists', 'extract', 'func',
'modifier', 'collate',
'insert', 'intersect', 'intersect_all', 'join', 'label', 'literal',
'literal_column', 'not_', 'null', 'or_', 'outparam', 'outerjoin', 'select',
'subquery', 'table', 'text', 'tuple_', 'union', 'union_all', 'update', ]
PARSE_AUTOCOMMIT = util._symbol('PARSE_AUTOCOMMIT')
def desc(column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
order_by = [desc(table1.mycol)]
"""
return _UnaryExpression(column, modifier=operators.desc_op)
def asc(column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
order_by = [asc(table1.mycol)]
"""
return _UnaryExpression(column, modifier=operators.asc_op)
def outerjoin(left, right, onclause=None):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`Join`.
Similar functionality is also available via the :func:`outerjoin()`
method on any :class:`FromClause`.
left
The left side of the join.
right
The right side of the join.
onclause
Optional criterion for the ``ON`` clause, is derived from
foreign key relationships established between left and right
otherwise.
To chain joins together, use the :func:`join()` or :func:`outerjoin()`
methods on the resulting :class:`Join` object.
"""
return Join(left, right, onclause, isouter=True)
def join(left, right, onclause=None, isouter=False):
"""Return a ``JOIN`` clause element (regular inner join).
The returned object is an instance of :class:`Join`.
Similar functionality is also available via the :func:`join()` method
on any :class:`FromClause`.
left
The left side of the join.
right
The right side of the join.
onclause
Optional criterion for the ``ON`` clause, is derived from
foreign key relationships established between left and right
otherwise.
To chain joins together, use the :func:`join()` or :func:`outerjoin()`
methods on the resulting :class:`Join` object.
"""
return Join(left, right, onclause, isouter)
def select(columns=None, whereclause=None, from_obj=[], **kwargs):
"""Returns a ``SELECT`` clause element.
Similar functionality is also available via the :func:`select()`
method on any :class:`FromClause`.
The returned object is an instance of :class:`Select`.
All arguments which accept :class:`ClauseElement` arguments also accept
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
columns
A list of :class:`ClauseElement` objects, typically :class:`ColumnElement`
objects or subclasses, which will form the columns clause of the
resulting statement. For all members which are instances of
:class:`Selectable`, the individual :class:`ColumnElement` members of the
:class:`Selectable` will be added individually to the columns clause.
For example, specifying a :class:`~sqlalchemy.schema.Table` instance will result in all
the contained :class:`~sqlalchemy.schema.Column` objects within to be added to the
columns clause.
This argument is not present on the form of :func:`select()`
available on :class:`~sqlalchemy.schema.Table`.
whereclause
A :class:`ClauseElement` expression which will be used to form the
``WHERE`` clause.
from_obj
A list of :class:`ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. Note that "from"
objects are automatically located within the columns and
whereclause ClauseElements. Use this parameter to explicitly
specify "from" objects which are not automatically locatable.
This could include :class:`~sqlalchemy.schema.Table` objects that aren't otherwise
present, or :class:`Join` objects whose presence will supercede that
of the :class:`~sqlalchemy.schema.Table` objects already located in the other clauses.
\**kwargs
Additional parameters include:
autocommit
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
prefixes
a list of strings or :class:`ClauseElement` objects to include
directly after the SELECT keyword in the generated statement,
for dialect-specific query features.
distinct=False
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
use_labels=False
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`Select` object will use these
names as well for targeting column members.
for_update=False
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement. Certain database dialects also support
alternate values for this parameter, for example mysql
supports "read" which translates to ``LOCK IN SHARE MODE``,
and oracle supports "nowait" which translates to ``FOR UPDATE
NOWAIT``.
correlate=True
indicates that this :class:`Select` object should have its
contained :class:`FromClause` elements "correlated" to an enclosing
:class:`Select` object. This means that any :class:`ClauseElement`
instance within the "froms" collection of this :class:`Select`
which is also present in the "froms" collection of an
enclosing select will not be rendered in the ``FROM`` clause
of this select statement.
group_by
a list of :class:`ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select.
having
a :class:`ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used.
order_by
a scalar or list of :class:`ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
limit=None
a numerical value which usually compiles to a ``LIMIT``
expression in the resulting select. Databases that don't
support ``LIMIT`` will attempt to provide similar
functionality.
offset=None
a numeric value which usually compiles to an ``OFFSET``
expression in the resulting select. Databases that don't
support ``OFFSET`` will attempt to provide similar
functionality.
bind=None
an ``Engine`` or ``Connection`` instance to which the
resulting ``Select ` object will be bound. The ``Select``
object will otherwise automatically bind to whatever
``Connectable`` instances can be located within its contained
:class:`ClauseElement` members.
"""
return Select(columns, whereclause=whereclause, from_obj=from_obj, **kwargs)
def subquery(alias, *args, **kwargs):
"""Return an :class:`Alias` object derived
from a :class:`Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def insert(table, values=None, inline=False, **kwargs):
"""Return an :class:`Insert` clause element.
Similar functionality is available via the :func:`insert()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be inserted into.
:param values: A dictionary which specifies the column specifications of the
``INSERT``, and is optional. If left as None, the column
specifications are determined from the bind parameters used
during the compile phase of the ``INSERT`` statement. If the
bind parameters also are None during the compile phase, then the
column specifications will be generated from the full list of
table columns. Note that the :meth:`~Insert.values()` generative method
may also be used for this.
:param prefixes: A list of modifier keywords to be inserted between INSERT
and INTO. Alternatively, the :meth:`~Insert.prefix_with` generative method
may be used.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their
string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
"""
return Insert(table, values, inline=inline, **kwargs)
def update(table, whereclause=None, values=None, inline=False, **kwargs):
"""Return an :class:`Update` clause element.
Similar functionality is available via the :func:`update()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`ClauseElement` describing the ``WHERE`` condition
of the ``UPDATE`` statement. Note that the :meth:`~Update.where()`
generative method may also be used for this.
:param values:
A dictionary which specifies the ``SET`` conditions of the
``UPDATE``, and is optional. If left as None, the ``SET``
conditions are determined from the bind parameters used during
the compile phase of the ``UPDATE`` statement. If the bind
parameters also are None during the compile phase, then the
``SET`` conditions will be generated from the full list of table
columns. Note that the :meth:`~Update.values()` generative method may
also be used for this.
:param inline:
if True, SQL defaults will be compiled 'inline' into the statement
and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column` objects or their
string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``UPDATE`` statement's table, the statement will be correlated
against the ``UPDATE`` statement.
"""
return Update(
table,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs)
def delete(table, whereclause = None, **kwargs):
"""Return a :class:`Delete` clause element.
Similar functionality is available via the :func:`delete()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the :meth:`~Delete.where()`
generative method may be used instead.
"""
return Delete(table, whereclause, **kwargs)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
def distinct(expr):
"""Return a ``DISTINCT`` clause."""
expr = _literal_as_binds(expr)
return _UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`_CompareMixin` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def case(whens, value=None, else_=None):
"""Produce a ``CASE`` statement.
whens
A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
value
Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
else\_
Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the literal_column(<string>) or
text(<string>) construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100, literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'", String))
], else_=literal_column("'lethan10'", String))
"""
return _Case(whens, value=value, else_=else_)
def cast(clause, totype, **kwargs):
"""Return a ``CAST`` function.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
"""
return _Cast(clause, totype, **kwargs)
def extract(field, expr):
"""Return the clause ``extract(field FROM expr)``."""
return _Extract(field, expr)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``."""
expr = _literal_as_binds(expression)
return _BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def exists(*args, **kwargs):
"""Return an ``EXISTS`` clause as applied to a :class:`Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
return _Exists(*args, **kwargs)
def union(*selects, **kwargs):
"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`FromClause` subclasses.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('UNION', *selects, **kwargs)
def union_all(*selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`FromClause` subclasses.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('UNION ALL', *selects, **kwargs)
def except_(*selects, **kwargs):
"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('EXCEPT', *selects, **kwargs)
def except_all(*selects, **kwargs):
"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('EXCEPT ALL', *selects, **kwargs)
def intersect(*selects, **kwargs):
"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('INTERSECT', *selects, **kwargs)
def intersect_all(*selects, **kwargs):
"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return _compound_select('INTERSECT ALL', *selects, **kwargs)
def alias(selectable, alias=None):
"""Return an :class:`Alias` object.
An :class:`Alias` represents any :class:`FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the :func:`alias()` method
available on all :class:`FromClause` subclasses.
selectable
any :class:`FromClause` subclass, such as a table, select
statement, etc..
alias
string name to be assigned as the alias. If ``None``, a
random name will be generated.
"""
return Alias(selectable, alias=alias)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`_CompareMixin`
subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the
generation of a literal clause, which will be created as a
:class:`_BindParamClause` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return _BindParamClause(None, value, type_=type_, unique=True)
def tuple_(*expr):
"""Return a SQL tuple.
Main usage is to produce a composite IN construct::
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
"""
return _Tuple(*expr)
def label(name, obj):
"""Return a :class:`_Label` object for the
given :class:`ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:func:`label()` method on :class:`ColumnElement`.
name
label name
obj
a :class:`ColumnElement`.
"""
return _Label(name, obj)
def column(text, type_=None):
"""Return a textual column clause, as would be in the columns clause of a
``SELECT`` statement.
The object returned is an instance of
:class:`ColumnClause`, which represents the
"syntactical" portion of the schema-level
:class:`~sqlalchemy.schema.Column` object.
text
the name of the column. Quoting rules will be applied to the
clause like any other column name. For textual column
constructs that are not to be quoted, use the
:func:`literal_column` function.
type\_
an optional :class:`~sqlalchemy.types.TypeEngine` object which will
provide result-set translation for this column.
"""
return ColumnClause(text, type_=type_)
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
text
the text of the expression; can be any SQL expression. Quoting rules
will not be applied. To specify a column-name expression which should
be subject to quoting rules, use the
:func:`column` function.
type\_
an optional :class:`~sqlalchemy.types.TypeEngine` object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
def table(name, *columns):
"""Return a :class:`TableClause` object.
This is a primitive version of the :class:`~sqlalchemy.schema.Table` object,
which is a subclass of this object.
"""
return TableClause(name, *columns)
def bindparam(key, value=None, type_=None, unique=False, required=False):
"""Create a bind parameter clause with the given key.
value
a default value for this bind parameter. a bindparam with a
value is called a ``value-based bindparam``.
type\_
a sqlalchemy.types.TypeEngine object indicating the type of this
bind param, will invoke type-specific bind parameter processing
unique
if True, bind params sharing the same name will have their
underlying ``key`` modified to a uniquely generated name.
mostly useful with value-based bind params.
required
A value is required at execution time.
"""
if isinstance(key, ColumnClause):
return _BindParamClause(key.name, value, type_=key.type, unique=unique, required=required)
else:
return _BindParamClause(key, value, type_=type_, unique=unique, required=required)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return _BindParamClause(
key, None, type_=type_, unique=False, isoutparam=True)
def text(text, bind=None, *args, **kwargs):
"""Create literal text to be inserted into a query.
When constructing a query from a :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`, using plain strings for argument
values will usually result in text objects being created
automatically. Use this function when creating textual clauses
outside of other :class:`ClauseElement` objects, or optionally wherever
plain text is to be used.
text
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
bind
an optional connection or engine to be used for this text query.
autocommit=True
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
bindparams
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
typemap
a dictionary mapping the names of columns represented in the
``SELECT`` clause of the textual statement to type objects,
which will be used to perform post-processing on columns within
the result set (for textual statements that produce result
sets).
"""
return _TextClause(text, bind=bind, *args, **kwargs)
def null():
"""Return a :class:`_Null` object, which compiles to ``NULL`` in a sql
statement.
"""
return _Null()
class _FunctionGenerator(object):
"""Generate :class:`Function` objects based on getattr calls."""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith('__'):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith('_'):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
if len(self.__names) == 1:
global functions
if functions is None:
from sqlalchemy.sql import functions
func = getattr(functions, self.__names[-1].lower(), None)
if func is not None:
return func(*c, **o)
return Function(
self.__names[-1], packagenames=self.__names[0:-1], *c, **o)
# "func" global - i.e. func.count()
func = _FunctionGenerator()
# "modifier" global - i.e. modifier.distinct
# TODO: use UnaryExpression for this instead ?
modifier = _FunctionGenerator(group=False)
class _generated_label(unicode):
"""A unicode subclass used to identify dynamically generated names."""
def _escape_for_generated(x):
if isinstance(x, _generated_label):
return x
else:
return x.replace('%', '%%')
def _clone(element):
return element._clone()
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the enties present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a if all_overlap.intersection(elem._cloned_set))
def _compound_select(keyword, *selects, **kwargs):
return CompoundSelect(keyword, *selects, **kwargs)
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _column_as_key(element):
if isinstance(element, basestring):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element.key
def _literal_as_text(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return _TextClause(unicode(element))
else:
return element
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_column(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return literal_column(str(element))
else:
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return null()
else:
return _BindParamClause(name, element, type_=type_, unique=True)
else:
return element
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' function "
"to indicate a SQL expression literal, or 'literal()' to indicate a bound value." % element)
else:
return element
def _corresponding_column_or_error(fromclause, column, require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),fromclause.description)
)
return c
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def is_column(col):
"""True if ``col`` is an instance of :class:`ColumnElement`."""
return isinstance(col, ColumnElement)
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
_bind = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
c.__dict__.pop('_cloned_set', None)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned anscestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = getattr(f, '_is_clone_of', None)
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
if util.jython:
def __hash__(self):
"""Return a distinct hash code.
ClauseElements may have special equality comparisons which
makes us rely on them having unique hash codes for use in
hash-based collections. Stock __hash__ doesn't guarantee
unique values on platforms with moving GCs.
"""
return id(self)
def _annotate(self, values):
"""return a copy of this ClauseElement with the given annotations
dictionary.
"""
global Annotated
if Annotated is None:
from sqlalchemy.sql.util import Annotated
return Annotated(self, values)
def _deannotate(self):
"""return a copy of this ClauseElement with an empty annotations
dictionary.
"""
return self._clone()
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elments replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elments replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam':visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
return self
# TODO: remove .bind as a method from the root ClauseElement.
# we should only be deriving binds from FromClause elements
# and certain SchemaItem subclasses.
# the "search_for_bind" functionality can still be used by
# execute(), however.
@property
def bind(self):
"""Returns the Engine or Connection to which this ClauseElement is
bound, or None if none found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`ClauseElement`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not bound and does not support direct '
'execution. Supply this statement to a Connection or '
'Engine for execution. Or, assign a bind to the statement '
'or the Metadata of its underlying tables to enable '
'implicit execution via this method.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`ClauseElement`, returning the result's
scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
def compile(self, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~sqlalchemy.engine.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~sqlalchemy.engine.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance frmo which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`ClauseElement`'s bound engine, if any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
global DefaultDialect
if DefaultDialect is None:
from sqlalchemy.engine.default import DefaultDialect
dialect = DefaultDialect()
compiler = self._compiler(dialect, bind=bind, **kw)
compiler.compile()
return compiler
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
# Py3K
#return unicode(self.compile())
# Py2K
return unicode(self.compile()).encode('ascii', 'backslashreplace')
# end Py2K
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __nonzero__(self):
raise TypeError("Boolean value of this clause is not defined")
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return _UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class _Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
class Operators(object):
def __and__(self, other):
return self.operate(operators.and_, other)
def __or__(self, other):
return self.operate(operators.or_, other)
def __invert__(self):
return self.operate(operators.inv)
def op(self, opstring):
def op(b):
return self.operate(operators.op, opstring, b)
return op
def operate(self, op, *other, **kwargs):
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
raise NotImplementedError(str(op))
class ColumnOperators(Operators):
"""Defines comparison and math operations."""
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
return self.operate(operators.lt, other)
def __le__(self, other):
return self.operate(operators.le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
return self.operate(operators.eq, other)
def __ne__(self, other):
return self.operate(operators.ne, other)
def __gt__(self, other):
return self.operate(operators.gt, other)
def __ge__(self, other):
return self.operate(operators.ge, other)
def __neg__(self):
return self.operate(operators.neg)
def concat(self, other):
return self.operate(operators.concat_op, other)
def like(self, other, escape=None):
return self.operate(operators.like_op, other, escape=escape)
def ilike(self, other, escape=None):
return self.operate(operators.ilike_op, other, escape=escape)
def in_(self, other):
return self.operate(operators.in_op, other)
def startswith(self, other, **kwargs):
return self.operate(operators.startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
return self.operate(operators.endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
return self.operate(operators.contains_op, other, **kwargs)
def match(self, other, **kwargs):
return self.operate(operators.match_op, other, **kwargs)
def desc(self):
return self.operate(operators.desc_op)
def asc(self):
return self.operate(operators.asc_op)
def collate(self, collation):
return self.operate(operators.collate, collation)
def __radd__(self, other):
return self.reverse_operate(operators.add, other)
def __rsub__(self, other):
return self.reverse_operate(operators.sub, other)
def __rmul__(self, other):
return self.reverse_operate(operators.mul, other)
def __rdiv__(self, other):
return self.reverse_operate(operators.div, other)
def between(self, cleft, cright):
return self.operate(operators.between_op, cleft, cright)
def distinct(self):
return self.operate(operators.distinct_op)
def __add__(self, other):
return self.operate(operators.add, other)
def __sub__(self, other):
return self.operate(operators.sub, other)
def __mul__(self, other):
return self.operate(operators.mul, other)
def __div__(self, other):
return self.operate(operators.div, other)
def __mod__(self, other):
return self.operate(operators.mod, other)
def __truediv__(self, other):
return self.operate(operators.truediv, other)
def __rtruediv__(self, other):
return self.reverse_operate(operators.truediv, other)
class _CompareMixin(ColumnOperators):
"""Defines comparison and math operations for :class:`ClauseElement` instances."""
def __compare(self, op, obj, negate=None, reverse=False, **kwargs):
if obj is None or isinstance(obj, _Null):
if op == operators.eq:
return _BinaryExpression(self, null(), operators.is_, negate=operators.isnot)
elif op == operators.ne:
return _BinaryExpression(self, null(), operators.isnot, negate=operators.is_)
else:
raise exc.ArgumentError("Only '='/'!=' operators can be used with NULL")
else:
obj = self._check_literal(obj)
if reverse:
return _BinaryExpression(obj,
self,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
return _BinaryExpression(self,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
def __operate(self, op, obj, reverse=False):
obj = self._check_literal(obj)
if reverse:
left, right = obj, self
else:
left, right = self, obj
if left.type is None:
op, result_type = sqltypes.NULLTYPE._adapt_expression(op, right.type)
elif right.type is None:
op, result_type = left.type._adapt_expression(op, sqltypes.NULLTYPE)
else:
op, result_type = left.type._adapt_expression(op, right.type)
return _BinaryExpression(left, right, op, type_=result_type)
# a mapping of operators with the method they use, along with their negated
# operator for comparison operators
operators = {
operators.add : (__operate,),
operators.mul : (__operate,),
operators.sub : (__operate,),
# Py2K
operators.div : (__operate,),
# end Py2K
operators.mod : (__operate,),
operators.truediv : (__operate,),
operators.lt : (__compare, operators.ge),
operators.le : (__compare, operators.gt),
operators.ne : (__compare, operators.eq),
operators.gt : (__compare, operators.le),
operators.ge : (__compare, operators.lt),
operators.eq : (__compare, operators.ne),
operators.like_op : (__compare, operators.notlike_op),
operators.ilike_op : (__compare, operators.notilike_op),
}
def operate(self, op, *other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other[0], *o[1:], **kwargs)
def reverse_operate(self, op, other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other, reverse=True, *o[1:], **kwargs)
def in_(self, other):
return self._in_impl(operators.in_op, operators.notin_op, other)
def _in_impl(self, op, negate_op, seq_or_selectable):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, _ScalarSelect):
return self.__compare( op, seq_or_selectable, negate=negate_op)
elif isinstance(seq_or_selectable, _SelectBaseMixin):
# TODO: if we ever want to support (x, y, z) IN (select x, y, z from table),
# we would need a multi-column version of as_scalar() to produce a multi-
# column selectable that does not export itself as a FROM clause
return self.__compare( op, seq_or_selectable.as_scalar(), negate=negate_op)
elif isinstance(seq_or_selectable, Selectable):
return self.__compare( op, seq_or_selectable, negate=negate_op)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance( o, _CompareMixin):
raise exc.InvalidRequestError(
"in() function accepts either a list of non-selectable values, "
"or a selectable: %r" % o)
else:
o = self._bind_param(o)
args.append(o)
if len(args) == 0:
# Special case handling for empty IN's, behave like comparison
# against zero row selectable. We use != to build the
# contradiction as it handles NULL values appropriately, i.e.
# "not (x IN ())" should not return NULL values for x.
util.warn("The IN-predicate on \"%s\" was invoked with an empty sequence. "
"This results in a contradiction, which nonetheless can be "
"expensive to evaluate. Consider alternative strategies for "
"improved performance." % self)
return self != self
return self.__compare(op, ClauseList(*args).self_group(against=op), negate=negate_op)
def __neg__(self):
return _UnaryExpression(self, operator=operators.neg)
def startswith(self, other, escape=None):
"""Produce the clause ``LIKE '<other>%'``"""
# use __radd__ to force string concat behavior
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String).__radd__(self._check_literal(other)),
escape=escape)
def endswith(self, other, escape=None):
"""Produce the clause ``LIKE '%<other>'``"""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) + self._check_literal(other),
escape=escape)
def contains(self, other, escape=None):
"""Produce the clause ``LIKE '%<other>%'``"""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) +
self._check_literal(other) +
literal_column("'%'", type_=sqltypes.String),
escape=escape)
def match(self, other):
"""Produce a MATCH clause, i.e. ``MATCH '<other>'``
The allowed contents of ``other`` are database backend specific.
"""
return self.__compare(operators.match_op, self._check_literal(other))
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
if 'name' is None, an anonymous label name will be generated.
"""
return _Label(name, self, self.type)
def desc(self):
"""Produce a DESC clause, i.e. ``<columnname> DESC``"""
return desc(self)
def asc(self):
"""Produce a ASC clause, i.e. ``<columnname> ASC``"""
return asc(self)
def distinct(self):
"""Produce a DISTINCT clause, i.e. ``DISTINCT <columnname>``"""
return _UnaryExpression(self, operator=operators.distinct_op, type_=self.type)
def between(self, cleft, cright):
"""Produce a BETWEEN clause, i.e. ``<column> BETWEEN <cleft> AND <cright>``"""
return _BinaryExpression(
self,
ClauseList(
self._check_literal(cleft),
self._check_literal(cright),
operator=operators.and_,
group=False),
operators.between_op)
def collate(self, collation):
"""Produce a COLLATE clause, i.e. ``<column> COLLATE utf8_bin``"""
return collate(self, collation)
def op(self, operator):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
:param operator: a string which will be output as the infix operator between
this :class:`ClauseElement` and the expression passed to the
generated function.
This function can also be used to make bitwise operators explicit. For example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in somecolumn.
"""
return lambda other: self.__operate(operator, other)
def _bind_param(self, obj):
return _BindParamClause(None, obj, _fallback_type=self.type, unique=True)
def _check_literal(self, other):
if isinstance(other, _BindParamClause) and isinstance(other.type, sqltypes.NullType):
other.type = self.type
return other
elif hasattr(other, '__clause_element__'):
return other.__clause_element__()
elif not isinstance(other, ClauseElement):
return self._bind_param(other)
elif isinstance(other, (_SelectBaseMixin, Alias)):
return other.as_scalar()
else:
return other
class ColumnElement(ClauseElement, _CompareMixin):
"""Represent an element that is usable within the "column clause" portion of a ``SELECT`` statement.
This includes columns associated with tables, aliases, and
subqueries, expressions, function calls, SQL keywords such as
``NULL``, literals, etc. :class:`ColumnElement` is the ultimate base
class for all such elements.
:class:`ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`ColumnElement` may be associated with
a :class:`Selectable` which was derived from another :class:`Selectable`.
An example of a "derived" :class:`Selectable` is an :class:`Alias` of a
:class:`~sqlalchemy.schema.Table`.
A :class:`ColumnElement`, by subclassing the :class:`_CompareMixin` mixin
class, provides the ability to generate new :class:`ClauseElement`
objects using Python expressions. See the :class:`_CompareMixin`
docstring for more details.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
quote = None
_label = None
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, 'proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, 'proxies'):
for c in self.proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`ColumnElement` has a common ancestor to this :class:`ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _make_proxy(self, selectable, name=None):
"""Create a new :class:`ColumnElement` representing this
:class:`ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name:
co = ColumnClause(name, selectable, type_=getattr(self, 'type', None))
else:
name = str(self)
co = ColumnClause(self.anon_label, selectable, type_=getattr(self, 'type', None))
co.proxies = [self]
selectable.columns[name] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this dictionary,
if any of the columns in the correponding set() pass the comparison
test, the result is True. This is used to expand the comparison to
other columns that may be known to be equivalent to this one via
foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif oth is self:
return True
else:
return False
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _generated_label("%%(%d %s)s" % (id(self), getattr(self, 'name', 'anon')))
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self, *cols):
super(ColumnCollection, self).__init__()
self.update((c.key, c) for c in cols)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased versions of this column
as well as existing columns with the same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
del self[other.name]
util.OrderedProperties.__setitem__(self, column.key, column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
self[column.key] = column
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements which have conflicting
# column names in their exported columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn(("Column %r on table %r being replaced by another "
"column with the same key. Consider use_labels "
"for select() statements.") % (key, getattr(existing, 'table', None)))
util.OrderedProperties.__setitem__(self, key, value)
def remove(self, column):
del self[column.key]
def extend(self, iter):
for c in iter:
self.add(c)
__hash__ = None
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __contains__(self, other):
if not isinstance(other, basestring):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def contains_column(self, col):
# have to use a Set here, because it will compare the identity
# of the column, not just using "==" for comparison which will always return a
# "True" value (i.e. a BinaryClause...)
return col in util.column_set(self)
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
quote = None
schema = None
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this :class:`FromClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`FromClause`."""
return select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
"""return a join of this :class:`FromClause` against another :class:`FromClause`."""
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
"""return an outer join of this :class:`FromClause` against another :class:`FromClause`."""
return Join(self, right, onclause, True)
def alias(self, name=None):
"""return an alias of this :class:`FromClause`.
For table objects, this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For select objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The :func:`alias()` method is the general way to create
a "subquery" out of an existing SELECT.
The ``name`` parameter is optional, and if left blank an
"anonymous" name will be generated at compile time, guaranteed
to be unique against other anonymous constructs used in the
same statement.
"""
return Alias(self, name)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
return fromclause in self._cloned_set
def replace_selectable(self, old, alias):
"""replace all occurences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`FromClause`.
"""
global ClauseAdapter
if ClauseAdapter is None:
from sqlalchemy.sql.util import ClauseAdapter
return ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`ColumnElement`, return the exported :class:`ColumnElement`
object from this :class:`Selectable` which corresponds to that
original :class:`~sqlalchemy.schema.Column` via a common anscestor column.
:param column: the target :class:`ColumnElement` to be matched
:param require_embedded: only return corresponding columns for the given
:class:`ColumnElement`, if the given :class:`ColumnElement` is
actually present within a sub-element of this
:class:`FromClause`. Normally the column will match if it merely
shares a common anscestor with one of the exported columns
of this :class:`FromClause`.
"""
# dont dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c
for c in cols:
i = target_set.intersection(itertools.chain(*[p._cloned_set for p in c.proxy_set]))
if i and \
(not require_embedded or c.proxy_set.issuperset(target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than 'col'.
# i.e. selectable.c.a1_x->a1.c.x->table.c.x matches
# a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence.
# see which proxy_set has fewer columns in it, which indicates
# a closer relationship with the root column. Also take into
# account the "weight" attribute which CompoundSelect() uses to
# give higher precedence to columns based on vertical position
# in the compound statement, and discard columns that have no
# reference to the target column (also occurs with
# CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1)
for sc in col.proxy_set
if sc.shares_lineage(column)]
)
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1)
for sc in c.proxy_set
if sc.shares_lineage(column)]
)
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
for attr in ('_columns', '_primary_key' '_foreign_keys', 'locate_all_froms'):
self.__dict__.pop(attr, None)
@util.memoized_property
def _columns(self):
"""Return the collection of Column objects contained by this FromClause."""
self._export_columns()
return self._columns
@util.memoized_property
def _primary_key(self):
"""Return the collection of Column objects which comprise the primary key of this FromClause."""
self._export_columns()
return self._primary_key
@util.memoized_property
def _foreign_keys(self):
"""Return the collection of ForeignKey objects which this FromClause references."""
self._export_columns()
return self._foreign_keys
columns = property(attrgetter('_columns'), doc=_columns.__doc__)
primary_key = property(
attrgetter('_primary_key'),
doc=_primary_key.__doc__)
foreign_keys = property(
attrgetter('_foreign_keys'),
doc=_foreign_keys.__doc__)
# synonyms for 'columns'
c = _select_iterable = property(attrgetter('columns'), doc=_columns.__doc__)
def _export_columns(self):
"""Initialize column collections."""
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
self._populate_column_collection()
def _populate_column_collection(self):
pass
class _BindParamClause(ColumnElement):
"""Represent a bind parameter.
Public constructor is the :func:`bindparam()` function.
"""
__visit_name__ = 'bindparam'
quote = None
def __init__(self, key, value, type_=None, unique=False,
isoutparam=False, required=False,
_fallback_type=None):
"""Construct a _BindParamClause.
key
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`_BindParamClause` objects exist with the same
key, or if its length is too long and truncation is
required.
value
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
type\_
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`_BindParamClause` at
execution time.
unique
if True, the key name of this BindParamClause will be
modified if another :class:`_BindParamClause` of the same name
already has been located within the containing
:class:`ClauseElement`.
required
a value is required at execution time.
isoutparam
if True, the parameter should be treated like a stored procedure "OUT"
parameter.
"""
if unique:
self.key = _generated_label("%%(%d %s)s" % (id(self), key or 'param'))
else:
self.key = key or _generated_label("%%(%d param)s" % id(self))
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.isoutparam = isoutparam
self.required = required
if type_ is None:
self.type = sqltypes.type_map.get(type(value), _fallback_type or sqltypes.NULLTYPE)
if _fallback_type and _fallback_type._type_affinity == self.type._type_affinity:
self.type = _fallback_type
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _generated_label("%%(%d %s)s" % (id(c), c._orig_key or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _generated_label("%%(%d %s)s" % (id(self), self._orig_key or 'param'))
def bind_processor(self, dialect):
return self.type.dialect_impl(dialect).bind_processor(dialect)
def compare(self, other, **kw):
"""Compare this :class:`_BindParamClause` to the given clause."""
return isinstance(other, _BindParamClause) and \
self.type._compare_type_affinity(other.type) and \
self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if util.callable(v):
v = v()
d['value'] = v
return d
def __repr__(self):
return "_BindParamClause(%r, %r, type_=%r)" % (
self.key, self.value, self.type
)
class _TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class _Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(_Generative):
"""Mark a ClauseElement as supporting execution."""
supports_execution = True
_execution_options = util.frozendict()
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during execution.
Current options include:
* autocommit - when True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit transaction
is not begun on the connection. Note that DBAPI connections by
default are always in a transaction - SQLAlchemy uses rules applied
to different kinds of statements to determine if COMMIT will be invoked
in order to provide its "autocommit" feature. Typically, all
INSERT/UPDATE/DELETE statements as well as CREATE/DROP statements
have autocommit behavior enabled; SELECT constructs do not. Use this
option when invokving a SELECT or other specific SQL construct
where COMMIT is desired (typically when calling stored procedures
and such).
* stream_results - indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2 dialect.
"""
self._execution_options = self._execution_options.union(kw)
# legacy, some outside users may be calling this
_Executable = Executable
class _TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = Executable._execution_options.union({'autocommit':PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
_hide_froms = []
def __init__(self, text = "", bind=None,
bindparams=None, typemap=None,
autocommit=None):
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated("autocommit on text() is deprecated. "
"Use .execution_options(autocommit=True)")
self._execution_options = self._execution_options.union({'autocommit':autocommit})
if typemap is not None:
for key in typemap.keys():
typemap[key] = sqltypes.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = bindparam(m.group(1))
return ":%s" % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return None
def _copy_internals(self, clone=_clone):
self.bindparams = dict((b.key, clone(b))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return self.bindparams.values()
class _Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
Public constructor is the :func:`null()` function.
"""
__visit_name__ = 'null'
def __init__(self):
self.type = sqltypes.NULLTYPE
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
@util.memoized_property
def type(self):
if self.clauses:
return self.clauses[0].type
else:
return sqltypes.NULLTYPE
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone):
self.clauses = [clone(clause) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and self.operator is not against and \
operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`ClauseList` to the given :class:`ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = sqltypes.to_instance(kwargs.get('type_', sqltypes.Boolean))
@property
def _select_iterable(self):
return (self, )
class _Tuple(ClauseList, ColumnElement):
def __init__(self, *clauses, **kw):
clauses = [_literal_as_binds(c) for c in clauses]
super(_Tuple, self).__init__(*clauses, **kw)
self.type = _type_from_args(clauses)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, obj):
return _Tuple(*[
_BindParamClause(None, o, _fallback_type=self.type, unique=True)
for o in obj
]).self_group()
class _Case(ColumnElement):
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(), _literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(), _literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone):
if self.value is not None:
self.value = clone(self.value)
self.whens = [(clone(x), clone(y)) for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in self.get_children()]))
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs."""
def __init__(self, *clauses, **kwargs):
args = [_literal_as_binds(c, self.name) for c in clauses]
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).\
self_group()
@property
def columns(self):
return [self]
@util.memoized_property
def clauses(self):
return self.clause_expr.element
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return self.clause_expr,
def _copy_internals(self, clone=_clone):
self.clause_expr = clone(self.clause_expr)
self._reset_exported()
util.reset_memoized(self, 'clauses')
def select(self):
s = select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
return self.select().execute().scalar()
def execute(self):
return self.select().execute()
def _bind_param(self, obj):
return _BindParamClause(None, obj, _fallback_type=self.type, unique=True)
class Function(FunctionElement):
"""Describe a named SQL function."""
__visit_name__ = 'function'
def __init__(self, name, *clauses, **kw):
self.packagenames = kw.pop('packagenames', None) or []
self.name = name
self._bind = kw.get('bind', None)
self.type = sqltypes.to_instance(kw.get('type_', None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, obj):
return _BindParamClause(self.name, obj, _fallback_type=self.type, unique=True)
class _Cast(ColumnElement):
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
self.type = sqltypes.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = _TypeClause(self.type)
def _copy_internals(self, clone=_clone):
self.clause = clone(self.clause)
self.typeclause = clone(self.typeclause)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class _Extract(ColumnElement):
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
self.type = sqltypes.Integer()
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone):
self.expr = clone(self.expr)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _UnaryExpression(ColumnElement):
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None, type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).self_group(against=self.operator or self.modifier)
self.type = sqltypes.to_instance(type_)
self.negate = negate
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`_UnaryExpression` against the given :class:`ClauseElement`."""
return (
isinstance(other, _UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return _UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(_UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
class _BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``."""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None, negate=None, modifiers=None):
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = sqltypes.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __nonzero__(self):
try:
return self.operator(hash(self.left), hash(self.right))
except:
raise TypeError("Boolean value of this clause is not defined")
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone):
self.left = clone(self.left)
self.right = clone(self.right)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`_BinaryExpression` against the given :class:`_BinaryExpression`."""
return (
isinstance(other, _BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
# use small/large defaults for comparison so that unknown
# operators are always parenthesized
if self.operator is not against and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return _BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=sqltypes.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(_BinaryExpression, self)._negate()
class _Exists(_UnaryExpression):
__visit_name__ = _UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], (_SelectBaseMixin, _ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = select(*args, **kwargs).as_scalar().self_group()
_UnaryExpression.__init__(self, s, operator=operators.exists, type_=sqltypes.Boolean)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def correlate(self, fromclause):
e = self._clone()
e.element = self.element.correlate(fromclause).self_group()
return e
def select_from(self, clause):
"""return a new exists() construct with the given expression set as its FROM
clause.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to its WHERE
clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`FromClause` elements.
The public constructor function for :class:`Join` is the module-level
:func:`join()` function, as well as the :func:`join()` method available
off all :class:`FromClause` subclasses.
"""
__visit_name__ = 'join'
def __init__(self, left, right, onclause=None, isouter=False):
self.left = _literal_as_text(left)
self.right = _literal_as_text(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
self.__folded_equivalents = None
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or\
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return _FromGrouping(self)
def _populate_column_collection(self):
columns = [c for c in self.left.columns] + [c for c in self.right.columns]
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
self._primary_key.extend(sql_util.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self._foreign_keys.update(itertools.chain(*[col.foreign_keys for col in columns]))
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.left = clone(self.left)
self.right = clone(self.right)
self.onclause = clone(self.onclause)
self.__folded_equivalents = None
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, primary, secondary):
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
return sql_util.join_condition(primary, secondary)
def select(self, whereclause=None, fold_equivalents=False, **kwargs):
"""Create a :class:`Select` from this :class:`Join`.
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param fold_equivalents: based on the join criterion of this
:class:`Join`, do not include
repeat column names in the column list of the resulting
select, for columns that are calculated to be "equivalent"
based on the join criterion of this :class:`Join`. This will
recursively apply to any joins directly nested by this one
as well. This flag is specific to a particular use case
by the ORM and is deprecated as of 0.6.
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
if fold_equivalents:
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
util.warn_deprecated("fold_equivalents is deprecated.")
collist = sql_util.folded_equivalents(self)
else:
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
def alias(self, name=None):
"""Create a :class:`Select` out of this :class:`Join` clause and return an :class:`Alias` of it.
The :class:`Select` is not correlating.
"""
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right) for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`alias()` module level
function as well as the :func:`alias()` method available on all
:class:`FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
def __init__(self, selectable, alias=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if alias is None:
if self.original.named_with_column:
alias = getattr(self.original, 'name', None)
alias = _generated_label('%%(%d %s)s' % (id(self), alias or 'anon'))
self.name = alias
@property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support 'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns:
col._make_proxy(self)
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.element = _clone(self.element)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, aliased_selectables=True, **kwargs):
if column_collections:
for c in self.c:
yield c
if aliased_selectables:
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class _Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', None)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element, 'type':self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
class _FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
@property
def columns(self):
return self.element.columns
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element}
def __setstate__(self, state):
self.element = state['element']
class _Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
This object is constructed from the :func:`label()` module level
function as well as the :func:`label()` method available on all
:class:`ColumnElement` subclasses.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
while isinstance(element, _Label):
element = element.element
self.name = self.key = self._label = name or \
_generated_label("%%(%d %s)s" % (
id(self), getattr(element, 'name', 'anon'))
)
self._element = element
self._type = type_
self.quote = element.quote
@util.memoized_property
def type(self):
return sqltypes.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def _proxy_attr(name):
get = attrgetter(name)
def attr(self):
return get(self.element)
return property(attr)
proxies = _proxy_attr('proxies')
base_columns = _proxy_attr('base_columns')
proxy_set = _proxy_attr('proxy_set')
primary_key = _proxy_attr('primary_key')
foreign_keys = _proxy_attr('foreign_keys')
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name = None):
if isinstance(self.element, (Selectable, ColumnElement)):
e = self.element._make_proxy(selectable, name=self.name)
else:
e = column(self.name)._make_proxy(selectable=selectable)
e.proxies.append(self)
return e
class ColumnClause(_Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`Selectable`. :class:`ColumnClause` is usually
created publically via the :func:`column()` function or the
:func:`literal_column()` function.
text
the text of the element.
selectable
parent selectable.
type
``TypeEngine`` object which can associate this :class:`ColumnClause`
with a type.
is_literal
if True, the :class:`ColumnClause` is assumed to be an exact
expression that will be delivered to the output with no quoting
rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`ColumnClause`.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
def __init__(self, text, selectable=None, type_=None, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type_)
self.is_literal = is_literal
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
@util.memoized_property
def _label(self):
if self.is_literal:
return None
elif self.table is not None and self.table.named_with_column:
if getattr(self.table, 'schema', None):
label = self.table.schema.replace('.', '_') + "_" + \
_escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
else:
label = _escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
return _generated_label(label)
else:
return self.name
def label(self, name):
if name is None:
return self
else:
return super(ColumnClause, self).label(name)
@property
def _from_objects(self):
if self.table is not None:
return [self.table]
else:
return []
def _bind_param(self, obj):
return _BindParamClause(self.name, obj, _fallback_type=self.type, unique=True)
def _make_proxy(self, selectable, name=None, attach=True):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = ColumnClause(
name or self.name,
selectable=selectable,
type_=self.type,
is_literal=is_literal
)
c.proxies = [self]
if attach:
selectable.columns[c.name] = c
return c
class TableClause(_Immutable, FromClause):
"""Represents a "table" construct.
Note that this represents tables only as another syntactical
construct within SQL expressions; it does not provide schema-level
functionality.
"""
__visit_name__ = 'table'
named_with_column = True
def __init__(self, name, *columns):
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
for c in columns:
self.append_column(c)
def _export_columns(self):
raise NotImplementedError()
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def append_column(self, c):
self._columns[c.name] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this :class:`TableClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def insert(self, values=None, inline=False, **kwargs):
"""Generate an :func:`insert()` construct."""
return insert(self, values=values, inline=inline, **kwargs)
def update(self, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`update()` construct."""
return update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
def delete(self, whereclause=None, **kwargs):
"""Generate a :func:`delete()` construct."""
return delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class _SelectBaseMixin(Executable):
"""Base class for :class:`Select` and ``CompoundSelects``."""
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
self.for_update = for_update
if autocommit is not None:
util.warn_deprecated("autocommit on select() is deprecated. "
"Use .execution_options(autocommit=True)")
self._execution_options = self._execution_options.union({'autocommit':autocommit})
self._limit = limit
self._offset = offset
self._bind = bind
self._order_by_clause = ClauseList(*util.to_list(order_by) or [])
self._group_by_clause = ClauseList(*util.to_list(group_by) or [])
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`_ScalarSelect`.
"""
return _ScalarSelect(self)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
See also ``as_scalar()``.
"""
return self.as_scalar().label(name)
@_generative
@util.deprecated(message="autocommit() is deprecated. "
"Use .execution_options(autocommit=True)")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to True."""
self._execution_options = self._execution_options.union({'autocommit':True})
def _generate(self):
"""Override the default _generate() method to also clear out exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion applied."""
self._limit = limit
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion applied."""
self._offset = offset
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY criterion applied.
The criterion will be appended to any pre-existing ORDER BY criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY criterion applied.
The criterion will be appended to any pre-existing GROUP BY criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
@property
def _from_objects(self):
return [self]
class _ScalarSelect(_Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
cols = list(element.c)
self.type = cols[0].type
@property
def columns(self):
raise exc.InvalidRequestError("Scalar Select expression has no columns; "
"use this object directly within a column-level expression.")
c = columns
def self_group(self, **kwargs):
return self
def _make_proxy(self, selectable, name):
return list(self.inner_columns)[0]._make_proxy(selectable, name)
class CompoundSelect(_SelectBaseMixin, FromClause):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations."""
__visit_name__ = 'compound_select'
def __init__(self, keyword, *selects, **kwargs):
self._should_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c)
elif len(s.c) != numcols:
raise exc.ArgumentError(
"All selectables passed to CompoundSelect must "
"have identical numbers of columns; select #%d has %d columns,"
" select #%d has %d" %
(1, len(self.selects[0].c), n+1, len(s.c))
)
self.selects.append(s.self_group(self))
_SelectBaseMixin.__init__(self, **kwargs)
def self_group(self, against=None):
return _FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c for s in self.selects]):
# this is a slightly hacky thing - the union exports a column that
# resembles just that of the *first* selectable. to get at a "composite" column,
# particularly foreign keys, you have to dig through the proxies collection
# which we generate below. We may want to improve upon this,
# such as perhaps _make_proxy can accept a list of other columns that
# are "shared" - schema.column can then copy all the ForeignKeys in.
# this would allow the union() to have all those fks too.
proxy = cols[0]._make_proxy(
self, name=self.use_labels and cols[0]._label or None)
# hand-construct the "proxies" collection to include all derived columns
# place a 'weight' annotation corresponding to how low in the list of
# select()s the column occurs, so that the corresponding_column() operation
# can resolve conflicts
proxy.proxies = [c._annotate({'weight':i + 1}) for i, c in enumerate(cols)]
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.selects = [clone(s) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) + \
[self._order_by_clause, self._group_by_clause] + list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class Select(_SelectBaseMixin, FromClause):
"""Represents a ``SELECT`` statement.
Select statements support appendable clauses, as well as the
ability to execute themselves and return a result set.
"""
__visit_name__ = 'select'
_prefixes = ()
def __init__(self,
columns,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
"""Construct a Select object.
The public constructor for Select is the
:func:`select` function; see that function for
argument descriptions.
Additional generative and mutator methods are available on the
:class:`_SelectBaseMixin` superclass.
"""
self._should_correlate = correlate
self._distinct = distinct
self._correlate = set()
self._froms = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _literal_as_column(c)
if isinstance(c, _ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
self._froms.update(_from_objects(*self._raw_columns))
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
self._froms.update(_from_objects(self._whereclause))
else:
self._whereclause = None
if from_obj is not None:
for f in util.to_list(from_obj):
if _is_literal(f):
self._froms.add(_TextClause(f))
else:
self._froms.add(f)
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
_SelectBaseMixin.__init__(self, **kwargs)
def _get_display_froms(self, existing_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = itertools.chain(*[f._hide_froms for f in froms])
if toremove:
froms = froms.difference(toremove)
if len(froms) > 1 or self._correlate:
if self._correlate:
froms = froms.difference(_cloned_intersection(froms, self._correlate))
if self._should_correlate and existing_froms:
froms = froms.difference(_cloned_intersection(froms, existing_froms))
if not len(froms):
raise exc.InvalidRequestError(
"Select statement '%s' returned no FROM clauses "
"due to auto-correlation; specify correlate(<tables>) "
"to control correlation manually." % self)
return froms
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@util.memoized_instancemethod
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property, which
is specifically for those FromClause elements that would actually be rendered.
"""
return self._froms.union(_from_objects(*list(self._froms)))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone):
self._reset_exported()
from_cloned = dict((f, clone(f))
for f in self._froms.union(self._correlate))
self._froms = util.OrderedSet(from_cloned[f] for f in self._froms)
self._correlate = set(from_cloned[f] for f in self._correlate)
self._raw_columns = [clone(c) for c in self._raw_columns]
for attr in ('_whereclause', '_having', '_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
"""
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
@_generative
def with_only_columns(self, columns):
"""return a new select() construct with its columns clause replaced
with the given columns.
"""
self._raw_columns = [
isinstance(c, _ScalarSelect) and
c.self_group(against=operators.comma_op) or c
for c in [_literal_as_column(c) for c in columns]
]
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to its
WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to its HAVING
clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self):
"""return a new select() construct which will apply DISTINCT to its columns
clause.
"""
self._distinct = True
@_generative
def prefix_with(self, clause):
"""return a new select() construct which will apply the given expression to the
start of its columns clause, not using any commas.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
@_generative
def select_from(self, fromclause):
"""return a new select() construct with the given FROM expression applied to its
list of FROM objects.
"""
fromclause = _literal_as_text(fromclause)
self._froms = self._froms.union([fromclause])
@_generative
def correlate(self, *fromclauses):
"""return a new select() construct which will correlate the given FROM clauses to
that of an enclosing select(), if a match is found.
By "match", the given fromclause must be present in this select's list of FROM
objects and also present in an enclosing select's list of FROM objects.
Calling this method turns off the select's default behavior of
"auto-correlation". Normally, select() auto-correlates all of its FROM clauses to
those of an embedded select when compiled.
If the fromclause is None, correlation is disabled for the returned select().
"""
self._should_correlate = False
if fromclauses == (None,):
self._correlate = set()
else:
self._correlate = self._correlate.union(fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select() construct."""
self._should_correlate = False
self._correlate = self._correlate.union([fromclause])
def append_column(self, column):
"""append the given column expression to the columns clause of this select()
construct.
"""
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
self._reset_exported()
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE criterion.
The expression will be joined to existing WHERE criterion via AND.
"""
whereclause = _literal_as_text(whereclause)
self._froms = self._froms.union(_from_objects(whereclause))
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, whereclause)
else:
self._whereclause = whereclause
def append_having(self, having):
"""append the given expression to this select() construct's HAVING criterion.
The expression will be joined to existing HAVING criterion via AND.
"""
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
else:
self._having = _literal_as_text(having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's FROM
clause.
"""
if _is_literal(fromclause):
fromclause = _TextClause(fromclause)
self._froms = self._froms.union([fromclause])
def __exportable_columns(self):
for column in self._raw_columns:
if isinstance(column, Selectable):
for co in column.columns:
yield co
elif isinstance(column, ColumnElement):
yield column
else:
continue
def _populate_column_collection(self):
for c in self.__exportable_columns():
c._make_proxy(self, name=self.use_labels and c._label or None)
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement specification.
This produces an element that can be embedded in an expression. Note that
this method is called automatically as needed when constructing expressions.
"""
if isinstance(against, CompoundSelect):
return self
return _FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given selectable."""
return union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given selectable."""
return except_(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the given
selectable.
"""
return except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the given
selectable.
"""
return intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
if not self._froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(self._froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class _UpdateBase(Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements."""
__visit_name__ = 'update_base'
_execution_options = Executable._execution_options.union({'autocommit':True})
kwargs = util.frozendict()
def _process_colparams(self, parameters):
if isinstance(parameters, (list, tuple)):
pp = {}
for i, c in enumerate(self.table.c):
pp[c.key] = parameters[i]
return pp
else:
return parameters
def params(self, *arg, **kw):
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
_returning_re = re.compile(r'(?:firebird|postgres(?:ql)?)_returning')
def _process_deprecated_kw(self, kwargs):
for k in list(kwargs):
m = self._returning_re.match(k)
if m:
self._returning = kwargs.pop(k)
util.warn_deprecated(
"The %r argument is deprecated. Please "
"use statement.returning(col1, col2, ...)" % k
)
return kwargs
@_generative
def returning(self, *cols):
"""Add a RETURNING or equivalent clause to this statement.
The given list of columns represent columns within the table
that is the target of the INSERT, UPDATE, or DELETE. Each
element can be any column expression. :class:`~sqlalchemy.schema.Table` objects
will be expanded into their individual columns.
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using ``fetchone()`` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
"""
self._returning = cols
class _ValuesBase(_UpdateBase):
__visit_name__ = 'values_base'
def __init__(self, table, values):
self.table = table
self.parameters = self._process_colparams(values)
@_generative
def values(self, *args, **kwargs):
"""specify the VALUES clause for an INSERT statement, or the SET clause for an
UPDATE.
\**kwargs
key=<somevalue> arguments
\*args
A single dictionary can be sent as the first positional argument. This
allows non-string based keys, such as Column objects, to be used.
"""
if args:
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters = self._process_colparams(v)
self.parameters.update(kwargs)
else:
self.parameters = self.parameters.copy()
self.parameters.update(self._process_colparams(v))
self.parameters.update(kwargs)
class Insert(_ValuesBase):
"""Represent an INSERT construct.
The :class:`Insert` object is created using the :func:`insert()` function.
"""
__visit_name__ = 'insert'
_prefixes = ()
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self.select = None
self.inline = inline
self._returning = returning
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
def _copy_internals(self, clone=_clone):
# TODO: coverage
self.parameters = self.parameters.copy()
@_generative
def prefix_with(self, clause):
"""Add a word or expression between INSERT and INTO. Generative.
If multiple prefixes are supplied, they will be separated with
spaces.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
class Update(_ValuesBase):
"""Represent an Update construct.
The :class:`Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause,
values=None,
inline=False,
bind=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone):
# TODO: coverage
self._whereclause = clone(self._whereclause)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to its WHERE
clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, _literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
class Delete(_UpdateBase):
"""Represent a DELETE construct.
The :class:`Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause,
bind=None,
returning =None,
**kwargs):
self._bind = bind
self.table = table
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, _literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone):
# TODO: coverage
self._whereclause = clone(self._whereclause)
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = Executable._execution_options.union({'autocommit':False})
quote = None
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
| 33.734289 | 115 | 0.622817 |
import itertools, re
from operator import attrgetter
from sqlalchemy import util, exc
from sqlalchemy.sql import operators
from sqlalchemy.sql.visitors import Visitable, cloned_traverse
import operator
functions, schema, sql_util, sqltypes = None, None, None, None
DefaultDialect, ClauseAdapter, Annotated = None, None, None
__all__ = [
'Alias', 'ClauseElement',
'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join',
'Select', 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc',
'between', 'bindparam', 'case', 'cast', 'column', 'delete',
'desc', 'distinct', 'except_', 'except_all', 'exists', 'extract', 'func',
'modifier', 'collate',
'insert', 'intersect', 'intersect_all', 'join', 'label', 'literal',
'literal_column', 'not_', 'null', 'or_', 'outparam', 'outerjoin', 'select',
'subquery', 'table', 'text', 'tuple_', 'union', 'union_all', 'update', ]
PARSE_AUTOCOMMIT = util._symbol('PARSE_AUTOCOMMIT')
def desc(column):
return _UnaryExpression(column, modifier=operators.desc_op)
def asc(column):
return _UnaryExpression(column, modifier=operators.asc_op)
def outerjoin(left, right, onclause=None):
return Join(left, right, onclause, isouter=True)
def join(left, right, onclause=None, isouter=False):
return Join(left, right, onclause, isouter)
def select(columns=None, whereclause=None, from_obj=[], **kwargs):
return Select(columns, whereclause=whereclause, from_obj=from_obj, **kwargs)
def subquery(alias, *args, **kwargs):
return Select(*args, **kwargs).alias(alias)
def insert(table, values=None, inline=False, **kwargs):
return Insert(table, values, inline=inline, **kwargs)
def update(table, whereclause=None, values=None, inline=False, **kwargs):
return Update(
table,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs)
def delete(table, whereclause = None, **kwargs):
return Delete(table, whereclause, **kwargs)
def and_(*clauses):
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
return operators.inv(_literal_as_binds(clause))
def distinct(expr):
expr = _literal_as_binds(expr)
return _UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type)
def between(ctest, cleft, cright):
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def case(whens, value=None, else_=None):
return _Case(whens, value=value, else_=else_)
def cast(clause, totype, **kwargs):
return _Cast(clause, totype, **kwargs)
def extract(field, expr):
return _Extract(field, expr)
def collate(expression, collation):
expr = _literal_as_binds(expression)
return _BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def exists(*args, **kwargs):
return _Exists(*args, **kwargs)
def union(*selects, **kwargs):
return _compound_select('UNION', *selects, **kwargs)
def union_all(*selects, **kwargs):
return _compound_select('UNION ALL', *selects, **kwargs)
def except_(*selects, **kwargs):
return _compound_select('EXCEPT', *selects, **kwargs)
def except_all(*selects, **kwargs):
return _compound_select('EXCEPT ALL', *selects, **kwargs)
def intersect(*selects, **kwargs):
return _compound_select('INTERSECT', *selects, **kwargs)
def intersect_all(*selects, **kwargs):
return _compound_select('INTERSECT ALL', *selects, **kwargs)
def alias(selectable, alias=None):
return Alias(selectable, alias=alias)
def literal(value, type_=None):
return _BindParamClause(None, value, type_=type_, unique=True)
def tuple_(*expr):
return _Tuple(*expr)
def label(name, obj):
return _Label(name, obj)
def column(text, type_=None):
return ColumnClause(text, type_=type_)
def literal_column(text, type_=None):
return ColumnClause(text, type_=type_, is_literal=True)
def table(name, *columns):
return TableClause(name, *columns)
def bindparam(key, value=None, type_=None, unique=False, required=False):
if isinstance(key, ColumnClause):
return _BindParamClause(key.name, value, type_=key.type, unique=unique, required=required)
else:
return _BindParamClause(key, value, type_=type_, unique=unique, required=required)
def outparam(key, type_=None):
return _BindParamClause(
key, None, type_=type_, unique=False, isoutparam=True)
def text(text, bind=None, *args, **kwargs):
return _TextClause(text, bind=bind, *args, **kwargs)
def null():
return _Null()
class _FunctionGenerator(object):
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
if name.startswith('__'):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith('_'):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
if len(self.__names) == 1:
global functions
if functions is None:
from sqlalchemy.sql import functions
func = getattr(functions, self.__names[-1].lower(), None)
if func is not None:
return func(*c, **o)
return Function(
self.__names[-1], packagenames=self.__names[0:-1], *c, **o)
func = _FunctionGenerator()
modifier = _FunctionGenerator(group=False)
class _generated_label(unicode):
def _escape_for_generated(x):
if isinstance(x, _generated_label):
return x
else:
return x.replace('%', '%%')
def _clone(element):
return element._clone()
def _expand_cloned(elements):
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a if all_overlap.intersection(elem._cloned_set))
def _compound_select(keyword, *selects, **kwargs):
return CompoundSelect(keyword, *selects, **kwargs)
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _column_as_key(element):
if isinstance(element, basestring):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element.key
def _literal_as_text(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return _TextClause(unicode(element))
else:
return element
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_column(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return literal_column(str(element))
else:
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return null()
else:
return _BindParamClause(name, element, type_=type_, unique=True)
else:
return element
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' function "
"to indicate a SQL expression literal, or 'literal()' to indicate a bound value." % element)
else:
return element
def _corresponding_column_or_error(fromclause, column, require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),fromclause.description)
)
return c
@util.decorator
def _generative(fn, *args, **kw):
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def is_column(col):
return isinstance(col, ColumnElement)
class ClauseElement(Visitable):
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
_bind = None
def _clone(self):
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
c.__dict__.pop('_cloned_set', None)
c._is_clone_of = self
return c
@util.memoized_property
def _cloned_set(self):
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = getattr(f, '_is_clone_of', None)
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
if util.jython:
def __hash__(self):
return id(self)
def _annotate(self, values):
global Annotated
if Annotated is None:
from sqlalchemy.sql.util import Annotated
return Annotated(self, values)
def _deannotate(self):
return self._clone()
def unique_params(self, *optionaldict, **kwargs):
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam':visit_bindparam})
def compare(self, other, **kw):
return self is other
def _copy_internals(self, clone=_clone):
pass
def get_children(self, **kwargs):
return []
def self_group(self, against=None):
return self
@property
def bind(self):
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
def execute(self, *multiparams, **params):
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not bound and does not support direct '
'execution. Supply this statement to a Connection or '
'Engine for execution. Or, assign a bind to the statement '
'or the Metadata of its underlying tables to enable '
'implicit execution via this method.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
return self.execute(*multiparams, **params).scalar()
def compile(self, bind=None, dialect=None, **kw):
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
global DefaultDialect
if DefaultDialect is None:
from sqlalchemy.engine.default import DefaultDialect
dialect = DefaultDialect()
compiler = self._compiler(dialect, bind=bind, **kw)
compiler.compile()
return compiler
def _compiler(self, dialect, **kw):
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __nonzero__(self):
raise TypeError("Boolean value of this clause is not defined")
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return _UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class _Immutable(object):
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
class Operators(object):
def __and__(self, other):
return self.operate(operators.and_, other)
def __or__(self, other):
return self.operate(operators.or_, other)
def __invert__(self):
return self.operate(operators.inv)
def op(self, opstring):
def op(b):
return self.operate(operators.op, opstring, b)
return op
def operate(self, op, *other, **kwargs):
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
raise NotImplementedError(str(op))
class ColumnOperators(Operators):
timetuple = None
def __lt__(self, other):
return self.operate(operators.lt, other)
def __le__(self, other):
return self.operate(operators.le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
return self.operate(operators.eq, other)
def __ne__(self, other):
return self.operate(operators.ne, other)
def __gt__(self, other):
return self.operate(operators.gt, other)
def __ge__(self, other):
return self.operate(operators.ge, other)
def __neg__(self):
return self.operate(operators.neg)
def concat(self, other):
return self.operate(operators.concat_op, other)
def like(self, other, escape=None):
return self.operate(operators.like_op, other, escape=escape)
def ilike(self, other, escape=None):
return self.operate(operators.ilike_op, other, escape=escape)
def in_(self, other):
return self.operate(operators.in_op, other)
def startswith(self, other, **kwargs):
return self.operate(operators.startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
return self.operate(operators.endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
return self.operate(operators.contains_op, other, **kwargs)
def match(self, other, **kwargs):
return self.operate(operators.match_op, other, **kwargs)
def desc(self):
return self.operate(operators.desc_op)
def asc(self):
return self.operate(operators.asc_op)
def collate(self, collation):
return self.operate(operators.collate, collation)
def __radd__(self, other):
return self.reverse_operate(operators.add, other)
def __rsub__(self, other):
return self.reverse_operate(operators.sub, other)
def __rmul__(self, other):
return self.reverse_operate(operators.mul, other)
def __rdiv__(self, other):
return self.reverse_operate(operators.div, other)
def between(self, cleft, cright):
return self.operate(operators.between_op, cleft, cright)
def distinct(self):
return self.operate(operators.distinct_op)
def __add__(self, other):
return self.operate(operators.add, other)
def __sub__(self, other):
return self.operate(operators.sub, other)
def __mul__(self, other):
return self.operate(operators.mul, other)
def __div__(self, other):
return self.operate(operators.div, other)
def __mod__(self, other):
return self.operate(operators.mod, other)
def __truediv__(self, other):
return self.operate(operators.truediv, other)
def __rtruediv__(self, other):
return self.reverse_operate(operators.truediv, other)
class _CompareMixin(ColumnOperators):
def __compare(self, op, obj, negate=None, reverse=False, **kwargs):
if obj is None or isinstance(obj, _Null):
if op == operators.eq:
return _BinaryExpression(self, null(), operators.is_, negate=operators.isnot)
elif op == operators.ne:
return _BinaryExpression(self, null(), operators.isnot, negate=operators.is_)
else:
raise exc.ArgumentError("Only '='/'!=' operators can be used with NULL")
else:
obj = self._check_literal(obj)
if reverse:
return _BinaryExpression(obj,
self,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
return _BinaryExpression(self,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
def __operate(self, op, obj, reverse=False):
obj = self._check_literal(obj)
if reverse:
left, right = obj, self
else:
left, right = self, obj
if left.type is None:
op, result_type = sqltypes.NULLTYPE._adapt_expression(op, right.type)
elif right.type is None:
op, result_type = left.type._adapt_expression(op, sqltypes.NULLTYPE)
else:
op, result_type = left.type._adapt_expression(op, right.type)
return _BinaryExpression(left, right, op, type_=result_type)
operators = {
operators.add : (__operate,),
operators.mul : (__operate,),
operators.sub : (__operate,),
operators.div : (__operate,),
operators.mod : (__operate,),
operators.truediv : (__operate,),
operators.lt : (__compare, operators.ge),
operators.le : (__compare, operators.gt),
operators.ne : (__compare, operators.eq),
operators.gt : (__compare, operators.le),
operators.ge : (__compare, operators.lt),
operators.eq : (__compare, operators.ne),
operators.like_op : (__compare, operators.notlike_op),
operators.ilike_op : (__compare, operators.notilike_op),
}
def operate(self, op, *other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other[0], *o[1:], **kwargs)
def reverse_operate(self, op, other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other, reverse=True, *o[1:], **kwargs)
def in_(self, other):
return self._in_impl(operators.in_op, operators.notin_op, other)
def _in_impl(self, op, negate_op, seq_or_selectable):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, _ScalarSelect):
return self.__compare( op, seq_or_selectable, negate=negate_op)
elif isinstance(seq_or_selectable, _SelectBaseMixin):
return self.__compare( op, seq_or_selectable.as_scalar(), negate=negate_op)
elif isinstance(seq_or_selectable, Selectable):
return self.__compare( op, seq_or_selectable, negate=negate_op)
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance( o, _CompareMixin):
raise exc.InvalidRequestError(
"in() function accepts either a list of non-selectable values, "
"or a selectable: %r" % o)
else:
o = self._bind_param(o)
args.append(o)
if len(args) == 0:
# against zero row selectable. We use != to build the
# contradiction as it handles NULL values appropriately, i.e.
# "not (x IN ())" should not return NULL values for x.
util.warn("The IN-predicate on \"%s\" was invoked with an empty sequence. "
"This results in a contradiction, which nonetheless can be "
"expensive to evaluate. Consider alternative strategies for "
"improved performance." % self)
return self != self
return self.__compare(op, ClauseList(*args).self_group(against=op), negate=negate_op)
def __neg__(self):
return _UnaryExpression(self, operator=operators.neg)
def startswith(self, other, escape=None):
# use __radd__ to force string concat behavior
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String).__radd__(self._check_literal(other)),
escape=escape)
def endswith(self, other, escape=None):
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) + self._check_literal(other),
escape=escape)
def contains(self, other, escape=None):
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) +
self._check_literal(other) +
literal_column("'%'", type_=sqltypes.String),
escape=escape)
def match(self, other):
return self.__compare(operators.match_op, self._check_literal(other))
def label(self, name):
return _Label(name, self, self.type)
def desc(self):
return desc(self)
def asc(self):
return asc(self)
def distinct(self):
return _UnaryExpression(self, operator=operators.distinct_op, type_=self.type)
def between(self, cleft, cright):
return _BinaryExpression(
self,
ClauseList(
self._check_literal(cleft),
self._check_literal(cright),
operator=operators.and_,
group=False),
operators.between_op)
def collate(self, collation):
return collate(self, collation)
def op(self, operator):
return lambda other: self.__operate(operator, other)
def _bind_param(self, obj):
return _BindParamClause(None, obj, _fallback_type=self.type, unique=True)
def _check_literal(self, other):
if isinstance(other, _BindParamClause) and isinstance(other.type, sqltypes.NullType):
other.type = self.type
return other
elif hasattr(other, '__clause_element__'):
return other.__clause_element__()
elif not isinstance(other, ClauseElement):
return self._bind_param(other)
elif isinstance(other, (_SelectBaseMixin, Alias)):
return other.as_scalar()
else:
return other
class ColumnElement(ClauseElement, _CompareMixin):
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
quote = None
_label = None
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, 'proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, 'proxies'):
for c in self.proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _make_proxy(self, selectable, name=None):
if name:
co = ColumnClause(name, selectable, type_=getattr(self, 'type', None))
else:
name = str(self)
co = ColumnClause(self.anon_label, selectable, type_=getattr(self, 'type', None))
co.proxies = [self]
selectable.columns[name] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif oth is self:
return True
else:
return False
@util.memoized_property
def anon_label(self):
return _generated_label("%%(%d %s)s" % (id(self), getattr(self, 'name', 'anon')))
class ColumnCollection(util.OrderedProperties):
def __init__(self, *cols):
super(ColumnCollection, self).__init__()
self.update((c.key, c) for c in cols)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
del self[other.name]
util.OrderedProperties.__setitem__(self, column.key, column)
def add(self, column):
self[column.key] = column
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements which have conflicting
# column names in their exported columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn(("Column %r on table %r being replaced by another "
"column with the same key. Consider use_labels "
"for select() statements.") % (key, getattr(existing, 'table', None)))
util.OrderedProperties.__setitem__(self, key, value)
def remove(self, column):
del self[column.key]
def extend(self, iter):
for c in iter:
self.add(c)
__hash__ = None
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __contains__(self, other):
if not isinstance(other, basestring):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def contains_column(self, col):
# have to use a Set here, because it will compare the identity
# of the column, not just using "==" for comparison which will always return a
# "True" value (i.e. a BinaryClause...)
return col in util.column_set(self)
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
class Selectable(ClauseElement):
__visit_name__ = 'selectable'
class FromClause(Selectable):
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
quote = None
schema = None
def count(self, whereclause=None, **params):
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
return Join(self, right, onclause, True)
def alias(self, name=None):
return Alias(self, name)
def is_derived_from(self, fromclause):
return fromclause in self._cloned_set
def replace_selectable(self, old, alias):
global ClauseAdapter
if ClauseAdapter is None:
from sqlalchemy.sql.util import ClauseAdapter
return ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
# dont dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c
for c in cols:
i = target_set.intersection(itertools.chain(*[p._cloned_set for p in c.proxy_set]))
if i and \
(not require_embedded or c.proxy_set.issuperset(target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than 'col'.
# i.e. selectable.c.a1_x->a1.c.x->table.c.x matches
# a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence.
# see which proxy_set has fewer columns in it, which indicates
# a closer relationship with the root column. Also take into
# account the "weight" attribute which CompoundSelect() uses to
# give higher precedence to columns based on vertical position
# in the compound statement, and discard columns that have no
# reference to the target column (also occurs with
# CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1)
for sc in col.proxy_set
if sc.shares_lineage(column)]
)
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1)
for sc in c.proxy_set
if sc.shares_lineage(column)]
)
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
for attr in ('_columns', '_primary_key' '_foreign_keys', 'locate_all_froms'):
self.__dict__.pop(attr, None)
@util.memoized_property
def _columns(self):
self._export_columns()
return self._columns
@util.memoized_property
def _primary_key(self):
self._export_columns()
return self._primary_key
@util.memoized_property
def _foreign_keys(self):
self._export_columns()
return self._foreign_keys
columns = property(attrgetter('_columns'), doc=_columns.__doc__)
primary_key = property(
attrgetter('_primary_key'),
doc=_primary_key.__doc__)
foreign_keys = property(
attrgetter('_foreign_keys'),
doc=_foreign_keys.__doc__)
# synonyms for 'columns'
c = _select_iterable = property(attrgetter('columns'), doc=_columns.__doc__)
def _export_columns(self):
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
self._populate_column_collection()
def _populate_column_collection(self):
pass
class _BindParamClause(ColumnElement):
__visit_name__ = 'bindparam'
quote = None
def __init__(self, key, value, type_=None, unique=False,
isoutparam=False, required=False,
_fallback_type=None):
if unique:
self.key = _generated_label("%%(%d %s)s" % (id(self), key or 'param'))
else:
self.key = key or _generated_label("%%(%d param)s" % id(self))
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.isoutparam = isoutparam
self.required = required
if type_ is None:
self.type = sqltypes.type_map.get(type(value), _fallback_type or sqltypes.NULLTYPE)
if _fallback_type and _fallback_type._type_affinity == self.type._type_affinity:
self.type = _fallback_type
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _generated_label("%%(%d %s)s" % (id(c), c._orig_key or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _generated_label("%%(%d %s)s" % (id(self), self._orig_key or 'param'))
def bind_processor(self, dialect):
return self.type.dialect_impl(dialect).bind_processor(dialect)
def compare(self, other, **kw):
return isinstance(other, _BindParamClause) and \
self.type._compare_type_affinity(other.type) and \
self.value == other.value
def __getstate__(self):
d = self.__dict__.copy()
v = self.value
if util.callable(v):
v = v()
d['value'] = v
return d
def __repr__(self):
return "_BindParamClause(%r, %r, type_=%r)" % (
self.key, self.value, self.type
)
class _TypeClause(ClauseElement):
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class _Generative(object):
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(_Generative):
supports_execution = True
_execution_options = util.frozendict()
@_generative
def execution_options(self, **kw):
self._execution_options = self._execution_options.union(kw)
# legacy, some outside users may be calling this
_Executable = Executable
class _TextClause(Executable, ClauseElement):
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = Executable._execution_options.union({'autocommit':PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
_hide_froms = []
def __init__(self, text = "", bind=None,
bindparams=None, typemap=None,
autocommit=None):
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated("autocommit on text() is deprecated. "
"Use .execution_options(autocommit=True)")
self._execution_options = self._execution_options.union({'autocommit':autocommit})
if typemap is not None:
for key in typemap.keys():
typemap[key] = sqltypes.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = bindparam(m.group(1))
return ":%s" % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return None
def _copy_internals(self, clone=_clone):
self.bindparams = dict((b.key, clone(b))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return self.bindparams.values()
class _Null(ColumnElement):
__visit_name__ = 'null'
def __init__(self):
self.type = sqltypes.NULLTYPE
class ClauseList(ClauseElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
@util.memoized_property
def type(self):
if self.clauses:
return self.clauses[0].type
else:
return sqltypes.NULLTYPE
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone):
self.clauses = [clone(clause) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and self.operator is not against and \
operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def compare(self, other, **kw):
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = sqltypes.to_instance(kwargs.get('type_', sqltypes.Boolean))
@property
def _select_iterable(self):
return (self, )
class _Tuple(ClauseList, ColumnElement):
def __init__(self, *clauses, **kw):
clauses = [_literal_as_binds(c) for c in clauses]
super(_Tuple, self).__init__(*clauses, **kw)
self.type = _type_from_args(clauses)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, obj):
return _Tuple(*[
_BindParamClause(None, o, _fallback_type=self.type, unique=True)
for o in obj
]).self_group()
class _Case(ColumnElement):
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(), _literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(), _literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone):
if self.value is not None:
self.value = clone(self.value)
self.whens = [(clone(x), clone(y)) for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in self.get_children()]))
class FunctionElement(Executable, ColumnElement, FromClause):
def __init__(self, *clauses, **kwargs):
args = [_literal_as_binds(c, self.name) for c in clauses]
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).\
self_group()
@property
def columns(self):
return [self]
@util.memoized_property
def clauses(self):
return self.clause_expr.element
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return self.clause_expr,
def _copy_internals(self, clone=_clone):
self.clause_expr = clone(self.clause_expr)
self._reset_exported()
util.reset_memoized(self, 'clauses')
def select(self):
s = select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
return self.select().execute().scalar()
def execute(self):
return self.select().execute()
def _bind_param(self, obj):
return _BindParamClause(None, obj, _fallback_type=self.type, unique=True)
class Function(FunctionElement):
__visit_name__ = 'function'
def __init__(self, name, *clauses, **kw):
self.packagenames = kw.pop('packagenames', None) or []
self.name = name
self._bind = kw.get('bind', None)
self.type = sqltypes.to_instance(kw.get('type_', None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, obj):
return _BindParamClause(self.name, obj, _fallback_type=self.type, unique=True)
class _Cast(ColumnElement):
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
self.type = sqltypes.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = _TypeClause(self.type)
def _copy_internals(self, clone=_clone):
self.clause = clone(self.clause)
self.typeclause = clone(self.typeclause)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class _Extract(ColumnElement):
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
self.type = sqltypes.Integer()
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone):
self.expr = clone(self.expr)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _UnaryExpression(ColumnElement):
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None, type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).self_group(against=self.operator or self.modifier)
self.type = sqltypes.to_instance(type_)
self.negate = negate
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
return (
isinstance(other, _UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return _UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(_UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
class _BinaryExpression(ColumnElement):
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None, negate=None, modifiers=None):
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = sqltypes.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __nonzero__(self):
try:
return self.operator(hash(self.left), hash(self.right))
except:
raise TypeError("Boolean value of this clause is not defined")
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone):
self.left = clone(self.left)
self.right = clone(self.right)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
return (
isinstance(other, _BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
# use small/large defaults for comparison so that unknown
# operators are always parenthesized
if self.operator is not against and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return _BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=sqltypes.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(_BinaryExpression, self)._negate()
class _Exists(_UnaryExpression):
__visit_name__ = _UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], (_SelectBaseMixin, _ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = select(*args, **kwargs).as_scalar().self_group()
_UnaryExpression.__init__(self, s, operator=operators.exists, type_=sqltypes.Boolean)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def correlate(self, fromclause):
e = self._clone()
e.element = self.element.correlate(fromclause).self_group()
return e
def select_from(self, clause):
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class Join(FromClause):
__visit_name__ = 'join'
def __init__(self, left, right, onclause=None, isouter=False):
self.left = _literal_as_text(left)
self.right = _literal_as_text(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
self.__folded_equivalents = None
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or\
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return _FromGrouping(self)
def _populate_column_collection(self):
columns = [c for c in self.left.columns] + [c for c in self.right.columns]
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
self._primary_key.extend(sql_util.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self._foreign_keys.update(itertools.chain(*[col.foreign_keys for col in columns]))
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.left = clone(self.left)
self.right = clone(self.right)
self.onclause = clone(self.onclause)
self.__folded_equivalents = None
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, primary, secondary):
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
return sql_util.join_condition(primary, secondary)
def select(self, whereclause=None, fold_equivalents=False, **kwargs):
if fold_equivalents:
global sql_util
if not sql_util:
from sqlalchemy.sql import util as sql_util
util.warn_deprecated("fold_equivalents is deprecated.")
collist = sql_util.folded_equivalents(self)
else:
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
def alias(self, name=None):
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right) for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
__visit_name__ = 'alias'
named_with_column = True
def __init__(self, selectable, alias=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if alias is None:
if self.original.named_with_column:
alias = getattr(self.original, 'name', None)
alias = _generated_label('%%(%d %s)s' % (id(self), alias or 'anon'))
self.name = alias
@property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support 'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns:
col._make_proxy(self)
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.element = _clone(self.element)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True, aliased_selectables=True, **kwargs):
if column_collections:
for c in self.c:
yield c
if aliased_selectables:
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class _Grouping(ColumnElement):
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', None)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element, 'type':self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
class _FromGrouping(FromClause):
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
@property
def columns(self):
return self.element.columns
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element}
def __setstate__(self, state):
self.element = state['element']
class _Label(ColumnElement):
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
while isinstance(element, _Label):
element = element.element
self.name = self.key = self._label = name or \
_generated_label("%%(%d %s)s" % (
id(self), getattr(element, 'name', 'anon'))
)
self._element = element
self._type = type_
self.quote = element.quote
@util.memoized_property
def type(self):
return sqltypes.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def _proxy_attr(name):
get = attrgetter(name)
def attr(self):
return get(self.element)
return property(attr)
proxies = _proxy_attr('proxies')
base_columns = _proxy_attr('base_columns')
proxy_set = _proxy_attr('proxy_set')
primary_key = _proxy_attr('primary_key')
foreign_keys = _proxy_attr('foreign_keys')
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name = None):
if isinstance(self.element, (Selectable, ColumnElement)):
e = self.element._make_proxy(selectable, name=self.name)
else:
e = column(self.name)._make_proxy(selectable=selectable)
e.proxies.append(self)
return e
class ColumnClause(_Immutable, ColumnElement):
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
def __init__(self, text, selectable=None, type_=None, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type_)
self.is_literal = is_literal
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
@util.memoized_property
def _label(self):
if self.is_literal:
return None
elif self.table is not None and self.table.named_with_column:
if getattr(self.table, 'schema', None):
label = self.table.schema.replace('.', '_') + "_" + \
_escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
else:
label = _escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
return _generated_label(label)
else:
return self.name
def label(self, name):
if name is None:
return self
else:
return super(ColumnClause, self).label(name)
@property
def _from_objects(self):
if self.table is not None:
return [self.table]
else:
return []
def _bind_param(self, obj):
return _BindParamClause(self.name, obj, _fallback_type=self.type, unique=True)
def _make_proxy(self, selectable, name=None, attach=True):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = ColumnClause(
name or self.name,
selectable=selectable,
type_=self.type,
is_literal=is_literal
)
c.proxies = [self]
if attach:
selectable.columns[c.name] = c
return c
class TableClause(_Immutable, FromClause):
__visit_name__ = 'table'
named_with_column = True
def __init__(self, name, *columns):
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
for c in columns:
self.append_column(c)
def _export_columns(self):
raise NotImplementedError()
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def append_column(self, c):
self._columns[c.name] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def count(self, whereclause=None, **params):
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def insert(self, values=None, inline=False, **kwargs):
return insert(self, values=values, inline=inline, **kwargs)
def update(self, whereclause=None, values=None, inline=False, **kwargs):
return update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
def delete(self, whereclause=None, **kwargs):
return delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class _SelectBaseMixin(Executable):
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
self.for_update = for_update
if autocommit is not None:
util.warn_deprecated("autocommit on select() is deprecated. "
"Use .execution_options(autocommit=True)")
self._execution_options = self._execution_options.union({'autocommit':autocommit})
self._limit = limit
self._offset = offset
self._bind = bind
self._order_by_clause = ClauseList(*util.to_list(order_by) or [])
self._group_by_clause = ClauseList(*util.to_list(group_by) or [])
def as_scalar(self):
return _ScalarSelect(self)
@_generative
def apply_labels(self):
self.use_labels = True
def label(self, name):
return self.as_scalar().label(name)
@_generative
@util.deprecated(message="autocommit() is deprecated. "
"Use .execution_options(autocommit=True)")
def autocommit(self):
self._execution_options = self._execution_options.union({'autocommit':True})
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@_generative
def limit(self, limit):
self._limit = limit
@_generative
def offset(self, offset):
self._offset = offset
@_generative
def order_by(self, *clauses):
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
@property
def _from_objects(self):
return [self]
class _ScalarSelect(_Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
cols = list(element.c)
self.type = cols[0].type
@property
def columns(self):
raise exc.InvalidRequestError("Scalar Select expression has no columns; "
"use this object directly within a column-level expression.")
c = columns
def self_group(self, **kwargs):
return self
def _make_proxy(self, selectable, name):
return list(self.inner_columns)[0]._make_proxy(selectable, name)
class CompoundSelect(_SelectBaseMixin, FromClause):
__visit_name__ = 'compound_select'
def __init__(self, keyword, *selects, **kwargs):
self._should_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c)
elif len(s.c) != numcols:
raise exc.ArgumentError(
"All selectables passed to CompoundSelect must "
"have identical numbers of columns; select #%d has %d columns,"
" select #%d has %d" %
(1, len(self.selects[0].c), n+1, len(s.c))
)
self.selects.append(s.self_group(self))
_SelectBaseMixin.__init__(self, **kwargs)
def self_group(self, against=None):
return _FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c for s in self.selects]):
# this is a slightly hacky thing - the union exports a column that
# resembles just that of the *first* selectable. to get at a "composite" column,
# particularly foreign keys, you have to dig through the proxies collection
# which we generate below. We may want to improve upon this,
# such as perhaps _make_proxy can accept a list of other columns that
# are "shared" - schema.column can then copy all the ForeignKeys in.
# this would allow the union() to have all those fks too.
proxy = cols[0]._make_proxy(
self, name=self.use_labels and cols[0]._label or None)
# hand-construct the "proxies" collection to include all derived columns
# place a 'weight' annotation corresponding to how low in the list of
# select()s the column occurs, so that the corresponding_column() operation
# can resolve conflicts
proxy.proxies = [c._annotate({'weight':i + 1}) for i, c in enumerate(cols)]
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.selects = [clone(s) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) + \
[self._order_by_clause, self._group_by_clause] + list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class Select(_SelectBaseMixin, FromClause):
__visit_name__ = 'select'
_prefixes = ()
def __init__(self,
columns,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
self._should_correlate = correlate
self._distinct = distinct
self._correlate = set()
self._froms = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _literal_as_column(c)
if isinstance(c, _ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
self._froms.update(_from_objects(*self._raw_columns))
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
self._froms.update(_from_objects(self._whereclause))
else:
self._whereclause = None
if from_obj is not None:
for f in util.to_list(from_obj):
if _is_literal(f):
self._froms.add(_TextClause(f))
else:
self._froms.add(f)
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
_SelectBaseMixin.__init__(self, **kwargs)
def _get_display_froms(self, existing_froms=None):
froms = self._froms
toremove = itertools.chain(*[f._hide_froms for f in froms])
if toremove:
froms = froms.difference(toremove)
if len(froms) > 1 or self._correlate:
if self._correlate:
froms = froms.difference(_cloned_intersection(froms, self._correlate))
if self._should_correlate and existing_froms:
froms = froms.difference(_cloned_intersection(froms, existing_froms))
if not len(froms):
raise exc.InvalidRequestError(
"Select statement '%s' returned no FROM clauses "
"due to auto-correlation; specify correlate(<tables>) "
"to control correlation manually." % self)
return froms
@property
def froms(self):
return self._get_display_froms()
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@util.memoized_instancemethod
def locate_all_froms(self):
return self._froms.union(_from_objects(*list(self._froms)))
@property
def inner_columns(self):
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone):
self._reset_exported()
from_cloned = dict((f, clone(f))
for f in self._froms.union(self._correlate))
self._froms = util.OrderedSet(from_cloned[f] for f in self._froms)
self._correlate = set(from_cloned[f] for f in self._correlate)
self._raw_columns = [clone(c) for c in self._raw_columns]
for attr in ('_whereclause', '_having', '_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
@_generative
def with_only_columns(self, columns):
self._raw_columns = [
isinstance(c, _ScalarSelect) and
c.self_group(against=operators.comma_op) or c
for c in [_literal_as_column(c) for c in columns]
]
@_generative
def where(self, whereclause):
self.append_whereclause(whereclause)
@_generative
def having(self, having):
self.append_having(having)
@_generative
def distinct(self):
self._distinct = True
@_generative
def prefix_with(self, clause):
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
@_generative
def select_from(self, fromclause):
fromclause = _literal_as_text(fromclause)
self._froms = self._froms.union([fromclause])
@_generative
def correlate(self, *fromclauses):
self._should_correlate = False
if fromclauses == (None,):
self._correlate = set()
else:
self._correlate = self._correlate.union(fromclauses)
def append_correlation(self, fromclause):
self._should_correlate = False
self._correlate = self._correlate.union([fromclause])
def append_column(self, column):
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
self._reset_exported()
def append_prefix(self, clause):
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
whereclause = _literal_as_text(whereclause)
self._froms = self._froms.union(_from_objects(whereclause))
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, whereclause)
else:
self._whereclause = whereclause
def append_having(self, having):
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
else:
self._having = _literal_as_text(having)
def append_from(self, fromclause):
if _is_literal(fromclause):
fromclause = _TextClause(fromclause)
self._froms = self._froms.union([fromclause])
def __exportable_columns(self):
for column in self._raw_columns:
if isinstance(column, Selectable):
for co in column.columns:
yield co
elif isinstance(column, ColumnElement):
yield column
else:
continue
def _populate_column_collection(self):
for c in self.__exportable_columns():
c._make_proxy(self, name=self.use_labels and c._label or None)
def self_group(self, against=None):
if isinstance(against, CompoundSelect):
return self
return _FromGrouping(self)
def union(self, other, **kwargs):
return union(self, other, **kwargs)
def union_all(self, other, **kwargs):
return union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
return except_(self, other, **kwargs)
def except_all(self, other, **kwargs):
return except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
return intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
return intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
if not self._froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(self._froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class _UpdateBase(Executable, ClauseElement):
__visit_name__ = 'update_base'
_execution_options = Executable._execution_options.union({'autocommit':True})
kwargs = util.frozendict()
def _process_colparams(self, parameters):
if isinstance(parameters, (list, tuple)):
pp = {}
for i, c in enumerate(self.table.c):
pp[c.key] = parameters[i]
return pp
else:
return parameters
def params(self, *arg, **kw):
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
_returning_re = re.compile(r'(?:firebird|postgres(?:ql)?)_returning')
def _process_deprecated_kw(self, kwargs):
for k in list(kwargs):
m = self._returning_re.match(k)
if m:
self._returning = kwargs.pop(k)
util.warn_deprecated(
"The %r argument is deprecated. Please "
"use statement.returning(col1, col2, ...)" % k
)
return kwargs
@_generative
def returning(self, *cols):
self._returning = cols
class _ValuesBase(_UpdateBase):
__visit_name__ = 'values_base'
def __init__(self, table, values):
self.table = table
self.parameters = self._process_colparams(values)
@_generative
def values(self, *args, **kwargs):
if args:
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters = self._process_colparams(v)
self.parameters.update(kwargs)
else:
self.parameters = self.parameters.copy()
self.parameters.update(self._process_colparams(v))
self.parameters.update(kwargs)
class Insert(_ValuesBase):
__visit_name__ = 'insert'
_prefixes = ()
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self.select = None
self.inline = inline
self._returning = returning
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
def _copy_internals(self, clone=_clone):
self.parameters = self.parameters.copy()
@_generative
def prefix_with(self, clause):
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
class Update(_ValuesBase):
__visit_name__ = 'update'
def __init__(self,
table,
whereclause,
values=None,
inline=False,
bind=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone):
self._whereclause = clone(self._whereclause)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, _literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
class Delete(_UpdateBase):
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause,
bind=None,
returning =None,
**kwargs):
self._bind = bind
self.table = table
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, _literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone):
self._whereclause = clone(self._whereclause)
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = Executable._execution_options.union({'autocommit':False})
quote = None
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
| true | true |
1c3962023870626bf65e5b10720a0f2283523934 | 617 | py | Python | benchmarks/storage/insertions.py | OneManEquipe/thinkerino | 4b9508156371643f31d1d26608e42e3aafcb0153 | [
"MIT"
] | 1 | 2020-05-02T19:44:18.000Z | 2020-05-02T19:44:18.000Z | benchmarks/storage/insertions.py | OneManEquipe/aitools | 4b9508156371643f31d1d26608e42e3aafcb0153 | [
"MIT"
] | 29 | 2019-08-07T17:49:03.000Z | 2021-08-31T10:25:00.000Z | benchmarks/storage/insertions.py | OneManEquipe/thinkerino | 4b9508156371643f31d1d26608e42e3aafcb0153 | [
"MIT"
] | null | null | null | from typing import Callable, Iterable
from aitools.logic import LogicObject
from aitools.storage.base import LogicObjectStorage
def leave_storage_empty(
storage: LogicObjectStorage,
initial_distribution: Callable[[], Iterable[LogicObject]]
) -> LogicObjectStorage:
return storage
def make_insert_n_formulas(n):
def insert_n_formulas(storage: LogicObjectStorage, distribution: Callable[[], Iterable[LogicObject]]):
for _, formula in zip(range(n), distribution()):
storage.add(formula)
insert_n_formulas.__name__ = f"insert_{n}_formulas"
return insert_n_formulas
| 32.473684 | 106 | 0.750405 | from typing import Callable, Iterable
from aitools.logic import LogicObject
from aitools.storage.base import LogicObjectStorage
def leave_storage_empty(
storage: LogicObjectStorage,
initial_distribution: Callable[[], Iterable[LogicObject]]
) -> LogicObjectStorage:
return storage
def make_insert_n_formulas(n):
def insert_n_formulas(storage: LogicObjectStorage, distribution: Callable[[], Iterable[LogicObject]]):
for _, formula in zip(range(n), distribution()):
storage.add(formula)
insert_n_formulas.__name__ = f"insert_{n}_formulas"
return insert_n_formulas
| true | true |
1c39636ebf838cf141a618ce92264d7097ceb929 | 1,620 | py | Python | src-py/gen.py | vtad4f/partitioner | 13494a0fd486f2475c788300206171f6d8a7a897 | [
"MIT"
] | null | null | null | src-py/gen.py | vtad4f/partitioner | 13494a0fd486f2475c788300206171f6d8a7a897 | [
"MIT"
] | null | null | null | src-py/gen.py | vtad4f/partitioner | 13494a0fd486f2475c788300206171f6d8a7a897 | [
"MIT"
] | null | null | null |
import random
def Main(fpath, n_vertices, n_edges, v_range=10, e_range=10):
"""
BRIEF Main execution (all but cmd line parsing)
"""
# Create file with single graph
with open(fpath, 'w') as f:
f.write('# t 1\n')
# Add vertices
vertices = range(n_vertices)
for v in vertices:
f.write('v {0} {1}\n'.format(v, random.choice(range(v_range))))
# Add edges
edges = set()
for e in range(n_edges):
success = False
while not success:
this_vertex = e if e < len(vertices) else random.choice(vertices)
other_vertex = random.choice(vertices)
# No cyclic graphs
if this_vertex != other_vertex:
# Don't write the same edge twice
directed = [this_vertex, other_vertex]
undirected = frozenset(directed)
if not undirected in edges:
edges.add(undirected)
success = True
random.shuffle(directed)
f.write('e {0} {1} {2}\n'.format(directed[0], directed[1], random.choice(range(e_range))))
if __name__ == '__main__':
"""
BRIEF Main execution (including cmd line parsing)
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('path')
parser.add_argument('v', type=int)
parser.add_argument('e', type=int)
args = parser.parse_args()
Main(args.path, args.v, args.e)
| 28.928571 | 108 | 0.524691 |
import random
def Main(fpath, n_vertices, n_edges, v_range=10, e_range=10):
with open(fpath, 'w') as f:
f.write('# t 1\n')
vertices = range(n_vertices)
for v in vertices:
f.write('v {0} {1}\n'.format(v, random.choice(range(v_range))))
edges = set()
for e in range(n_edges):
success = False
while not success:
this_vertex = e if e < len(vertices) else random.choice(vertices)
other_vertex = random.choice(vertices)
if this_vertex != other_vertex:
directed = [this_vertex, other_vertex]
undirected = frozenset(directed)
if not undirected in edges:
edges.add(undirected)
success = True
random.shuffle(directed)
f.write('e {0} {1} {2}\n'.format(directed[0], directed[1], random.choice(range(e_range))))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('path')
parser.add_argument('v', type=int)
parser.add_argument('e', type=int)
args = parser.parse_args()
Main(args.path, args.v, args.e)
| true | true |
1c39645a0bc17b9cb1034ee80c11537442818baf | 5,014 | py | Python | bq_du/du.py | solution3o6s/bq-tools | 00e5f6714724bc4034fed8afd26c8eec34d4a560 | [
"Apache-2.0"
] | null | null | null | bq_du/du.py | solution3o6s/bq-tools | 00e5f6714724bc4034fed8afd26c8eec34d4a560 | [
"Apache-2.0"
] | null | null | null | bq_du/du.py | solution3o6s/bq-tools | 00e5f6714724bc4034fed8afd26c8eec34d4a560 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use du file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
from google.cloud import bigquery
class DuClient(object):
def __init__(self, legacy, bq_table_name):
self.client = bigquery.Client()
self.table_name = '[{}]'.format(bq_table_name) if legacy else '`{}`'.format(bq_table_name)
self.job_config = bigquery.QueryJobConfig(
dry_run=True,
use_legacy_sql=legacy,
use_query_cache=False
)
def du(self, field):
return self.client.query(
query="SELECT {} FROM {}".format(field, self.table_name),
job_config=self.job_config
).total_bytes_processed
def load_schema(schema_path):
if schema_path:
with open(schema_path, 'r') as schemaFile:
return json.loads(schemaFile.read())
def du_field(field_du):
return du_client.du(field_du)
def __travel_fields__(root, traveled_path, traveled_depth, stop_depth, field_path_separator):
if 0 < stop_depth <= traveled_depth:
return
for f in root:
field_name = f.get('name', None)
if not field_name:
continue
field_type = f.get('type', None)
is_field_record = field_type.lower() == "record"
field_path = '{}{}{}'.format(traveled_path, field_path_separator, field_name)
field_size = du_field(
field_path+'.*' if is_field_record else field_path
)
yield field_path, \
traveled_depth if is_field_record else 'L', field_size
if is_field_record:
for p in __travel_fields__(f.get("fields", None), field_path, traveled_depth + 1, stop_depth, '.'):
yield p
def travel_fields(root, stop_depth=-1):
if not isinstance(root, list):
return
return __travel_fields__(
root,
"",
0,
stop_depth,
''
)
def human_readable_size(size):
resp = '{}B'.format(size)
for fmt in [[10, 'KB'], [10, 'MB'], [10, 'GB'], [10, 'TB']]:
size = size >> fmt[0]
if not size:
break
resp = '{}{}'.format(size, fmt[1])
return resp
def csv_size(size):
return '{}'.format(size)
def raw_size(size):
return '{}B'.format(size)
def raw_output_formatter(out, options):
fmt = raw_size
pad = BYTES_PADDING
if 'h' in options:
pad = HUMAN_PADDING
fmt = human_readable_size
for field, _, size in out:
print('{}\t{}'.format(fmt(size).rjust(pad), field))
def csv_output_formatter(out, options):
fmt = csv_size
if 'h' in options:
fmt = human_readable_size
print('field,level,size')
for field, level, size in out:
print('{},{},{}'.format(field, level, fmt(size)))
OUTPUT_FORMATTERS = {
'csv': csv_output_formatter,
'raw': raw_output_formatter,
}
args_parser = argparse.ArgumentParser()
BYTES_PADDING = 15
HUMAN_PADDING = 5
table_name = ''
if __name__ == "__main__":
args_parser.add_argument(
'--table_name', '--table_name', help='BQ table name clause.'
)
args_parser.add_argument(
'--schema', '--schema', required=True, help='Path to a BQ schema file.'
)
args_parser.add_argument(
'-d', '--depth', default=-1, help='Display an entry for all fields depth records deep.'
)
args_parser.add_argument(
'--format', '--format', default='raw', help='Output format. Either CSV or raw. Default is raw.'
)
args_parser.add_argument(
'--use_legacy_sql', action='store_true', default=False, help='Use legacy SQL. Default is false.'
)
args_parser.add_argument(
'--human_readable', '--human_readable', action='store_true', default=False, help='"Human-readable" output.'
)
args = args_parser.parse_args()
bq_schema = None
try:
bq_schema = load_schema(args.schema)
except Exception as e:
bq_schema = None
print('Failed to load BQ schema. Error: {}'.format(e))
if not bq_schema:
print('Empty BQ schema. Exiting ...')
exit(1)
if "fields" not in bq_schema:
print('Invalid BQ schema. Missing fields. Exiting ...')
exit(1)
du_client = DuClient(
args.use_legacy_sql,
args.table_name
)
opts = ['h'] \
if args.human_readable else []
fields_stats = travel_fields(
bq_schema.get("fields"),
args.depth
)
OUTPUT_FORMATTERS.get(args.format)(
fields_stats,
opts
)
| 25.451777 | 115 | 0.623454 |
import json
import argparse
from google.cloud import bigquery
class DuClient(object):
def __init__(self, legacy, bq_table_name):
self.client = bigquery.Client()
self.table_name = '[{}]'.format(bq_table_name) if legacy else '`{}`'.format(bq_table_name)
self.job_config = bigquery.QueryJobConfig(
dry_run=True,
use_legacy_sql=legacy,
use_query_cache=False
)
def du(self, field):
return self.client.query(
query="SELECT {} FROM {}".format(field, self.table_name),
job_config=self.job_config
).total_bytes_processed
def load_schema(schema_path):
if schema_path:
with open(schema_path, 'r') as schemaFile:
return json.loads(schemaFile.read())
def du_field(field_du):
return du_client.du(field_du)
def __travel_fields__(root, traveled_path, traveled_depth, stop_depth, field_path_separator):
if 0 < stop_depth <= traveled_depth:
return
for f in root:
field_name = f.get('name', None)
if not field_name:
continue
field_type = f.get('type', None)
is_field_record = field_type.lower() == "record"
field_path = '{}{}{}'.format(traveled_path, field_path_separator, field_name)
field_size = du_field(
field_path+'.*' if is_field_record else field_path
)
yield field_path, \
traveled_depth if is_field_record else 'L', field_size
if is_field_record:
for p in __travel_fields__(f.get("fields", None), field_path, traveled_depth + 1, stop_depth, '.'):
yield p
def travel_fields(root, stop_depth=-1):
if not isinstance(root, list):
return
return __travel_fields__(
root,
"",
0,
stop_depth,
''
)
def human_readable_size(size):
resp = '{}B'.format(size)
for fmt in [[10, 'KB'], [10, 'MB'], [10, 'GB'], [10, 'TB']]:
size = size >> fmt[0]
if not size:
break
resp = '{}{}'.format(size, fmt[1])
return resp
def csv_size(size):
return '{}'.format(size)
def raw_size(size):
return '{}B'.format(size)
def raw_output_formatter(out, options):
fmt = raw_size
pad = BYTES_PADDING
if 'h' in options:
pad = HUMAN_PADDING
fmt = human_readable_size
for field, _, size in out:
print('{}\t{}'.format(fmt(size).rjust(pad), field))
def csv_output_formatter(out, options):
fmt = csv_size
if 'h' in options:
fmt = human_readable_size
print('field,level,size')
for field, level, size in out:
print('{},{},{}'.format(field, level, fmt(size)))
OUTPUT_FORMATTERS = {
'csv': csv_output_formatter,
'raw': raw_output_formatter,
}
args_parser = argparse.ArgumentParser()
BYTES_PADDING = 15
HUMAN_PADDING = 5
table_name = ''
if __name__ == "__main__":
args_parser.add_argument(
'--table_name', '--table_name', help='BQ table name clause.'
)
args_parser.add_argument(
'--schema', '--schema', required=True, help='Path to a BQ schema file.'
)
args_parser.add_argument(
'-d', '--depth', default=-1, help='Display an entry for all fields depth records deep.'
)
args_parser.add_argument(
'--format', '--format', default='raw', help='Output format. Either CSV or raw. Default is raw.'
)
args_parser.add_argument(
'--use_legacy_sql', action='store_true', default=False, help='Use legacy SQL. Default is false.'
)
args_parser.add_argument(
'--human_readable', '--human_readable', action='store_true', default=False, help='"Human-readable" output.'
)
args = args_parser.parse_args()
bq_schema = None
try:
bq_schema = load_schema(args.schema)
except Exception as e:
bq_schema = None
print('Failed to load BQ schema. Error: {}'.format(e))
if not bq_schema:
print('Empty BQ schema. Exiting ...')
exit(1)
if "fields" not in bq_schema:
print('Invalid BQ schema. Missing fields. Exiting ...')
exit(1)
du_client = DuClient(
args.use_legacy_sql,
args.table_name
)
opts = ['h'] \
if args.human_readable else []
fields_stats = travel_fields(
bq_schema.get("fields"),
args.depth
)
OUTPUT_FORMATTERS.get(args.format)(
fields_stats,
opts
)
| true | true |
1c39648bd9b3c766508517dd33c70d81bcea3cf4 | 677 | py | Python | backend/cloud-run-api/app/main.py | tuxedocat/fast-annotation-tool | 2e28e81bf5b383ac033eeae847921d68ed302556 | [
"Apache-2.0"
] | 24 | 2021-06-08T06:20:55.000Z | 2022-03-24T07:27:08.000Z | backend/cloud-run-api/app/main.py | shunyooo/fast-annotation-tool | 0f7b23eb9e664daf33c6ada366ea550996d103b3 | [
"Apache-2.0"
] | null | null | null | backend/cloud-run-api/app/main.py | shunyooo/fast-annotation-tool | 0f7b23eb9e664daf33c6ada366ea550996d103b3 | [
"Apache-2.0"
] | 6 | 2021-06-08T02:39:31.000Z | 2022-01-08T15:10:01.000Z | from fastapi import FastAPI
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException
from app.api.errors.http_error import http_error_handler
from app.api.errors.validation_error import http422_error_handler
from app.api.routes.api import router as api_router
from app.config.api_config import PROJECT_NAME, VERSION, DEBUG
def get_app() -> FastAPI:
app = FastAPI(title=PROJECT_NAME, version=VERSION, debug=DEBUG)
app.add_exception_handler(HTTPException, http_error_handler)
app.add_exception_handler(RequestValidationError, http422_error_handler)
app.include_router(api_router)
return app
app = get_app()
| 29.434783 | 76 | 0.82127 | from fastapi import FastAPI
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException
from app.api.errors.http_error import http_error_handler
from app.api.errors.validation_error import http422_error_handler
from app.api.routes.api import router as api_router
from app.config.api_config import PROJECT_NAME, VERSION, DEBUG
def get_app() -> FastAPI:
app = FastAPI(title=PROJECT_NAME, version=VERSION, debug=DEBUG)
app.add_exception_handler(HTTPException, http_error_handler)
app.add_exception_handler(RequestValidationError, http422_error_handler)
app.include_router(api_router)
return app
app = get_app()
| true | true |
1c3964ad3ab8088a59258ab382cba7f9e14aea70 | 10,926 | bzl | Python | tools/build_rules/llvm/llvm.bzl | cushon/kythe | 1e1d70062adac6e28a3ecdd2b078784d970ff036 | [
"Apache-2.0"
] | null | null | null | tools/build_rules/llvm/llvm.bzl | cushon/kythe | 1e1d70062adac6e28a3ecdd2b078784d970ff036 | [
"Apache-2.0"
] | null | null | null | tools/build_rules/llvm/llvm.bzl | cushon/kythe | 1e1d70062adac6e28a3ecdd2b078784d970ff036 | [
"Apache-2.0"
] | null | null | null | load("@io_kythe//tools/build_rules/llvm:configure_file.bzl", "configure_file")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@bazel_skylib//lib:collections.bzl", "collections")
def _repo_path(path):
if native.repository_name() == "@":
return path
return paths.join("external", native.repository_name()[1:], path.lstrip("/"))
def _llvm_build_deps(ctx, name):
# TODO(shahms): Do this transformation during generation.
llvm_to_cmake = {
"Scalar": "ScalarOpts",
"IPO": "ipo",
"ObjCARC": "ObjCARCOpts",
}
cmake_to_llvm = dict([(v, k) for k, v in llvm_to_cmake.items()])
name = _replace_prefix(name, "LLVM", "")
name = cmake_to_llvm.get(name, name)
return [
":LLVM" + llvm_to_cmake.get(d, d)
for d in ctx._config.llvm_build_deps.get(name, [])
]
def _root_path(ctx):
return paths.join(*[s.path for s in ctx._state]).lstrip("/")
def _join_path(root, path):
"""Special handling for absolute paths."""
if path.startswith("/"):
return paths.normalize(path.lstrip("/")) # CMake paths are all "rooted" at the workspace.
return paths.normalize(paths.join(root.lstrip("/"), path))
def _llvm_headers(root):
root = _replace_prefix(root, "lib/", "include/llvm/")
return native.glob([_join_path(root, "**/*.*")])
def _replace_prefix(value, prefix, repl):
if value.startswith(prefix):
return repl + value[len(prefix):]
return value
def _clang_headers(root):
root = _replace_prefix(root, "tools/clang/lib/", "tools/clang/include/clang/")
return native.glob([_join_path(root, "**/*.*")])
def _llvm_srcglob(root, additional_header_dirs = []):
srcglob = [_join_path(root, "*.h"), _join_path(root, "*.inc")]
for dir in additional_header_dirs:
srcglob.extend([
paths.join(_join_path(root, dir), "*.h"),
paths.join(_join_path(root, dir), "*.inc"),
])
return native.glob(srcglob)
def _clang_srcglob(root):
return native.glob([_join_path(root, "**/*.h"), _join_path(root, "**/*.inc")])
def _current(ctx):
return ctx._state[-1]
def _colonize(name):
for prefix in [":", "/", "@"]:
if name.startswith(prefix):
return name
return ":" + name
def _genfile_name(path):
return path.lstrip("/").replace("/", "_").replace(".", "_").replace("-", "_")
def _group_sections(sections, args, leader = "srcs"):
blocks = [(leader, [])]
for arg in args:
if arg in sections:
blocks.append((arg.lower(), []))
continue
blocks[-1][-1].append(arg)
return blocks
def _make_kwargs(ctx, name, args = [], sections = [], leader = "srcs"):
kwargs = {}
kwargs.update(ctx._config.target_defaults.get(name, {}))
for key, values in _group_sections(sections, list(args), leader = leader):
kwargs.setdefault(key, []).extend([v for v in values if v])
return kwargs
def _configure_file(ctx, src, out, *unused):
if not (src and out):
return
root = _root_path(ctx)
configure_file(
name = _genfile_name(out),
src = _join_path(root, src),
out = _join_path(root, out),
defines = select({
"//conditions:default": ctx._config.cmake_defines.default,
"@io_kythe//:darwin": ctx._config.cmake_defines.darwin,
}),
)
def _llvm_library(ctx, name, srcs, hdrs = [], deps = [], additional_header_dirs = [], **kwargs):
# TODO(shahms): Do something with these
kwargs.pop("link_libs", None)
root = _root_path(ctx)
depends = ([":llvm-c"] + deps +
kwargs.pop("depends", []) +
_llvm_build_deps(ctx, name))
depends = collections.uniq([_colonize(d) for d in depends])
defs = native.glob([_join_path(root, "*.def")])
if defs:
native.cc_library(
name = name + "_defs",
textual_hdrs = defs,
visibility = ["//visibility:private"],
)
depends.append(":" + name + "_defs")
sources = (
[_join_path(root, s) for s in srcs] +
_llvm_srcglob(root, additional_header_dirs) +
_current(ctx).table_outs
)
includes = [root]
if "/Target/" in root:
parts = root.split("/")
target = parts[parts.index("Target") + 1]
target_root = "/".join(parts[:parts.index("Target") + 2])
if target_root and target_root != root:
sources += native.glob([_join_path(target_root, "**/*.h")])
includes.append(target_root)
depends.append(":" + target + "CommonTableGen")
kind = _replace_prefix(name, "LLVM" + target, "")
target_kind_deps = {
"Utils": [":LLVMMC", ":LLVMCodeGen"],
"Info": [":LLVMMC", ":LLVMTarget"],
"AsmPrinter": [":LLVMTarget", ":LLVMCodeGen"],
}
depends += target_kind_deps.get(kind, [])
native.cc_library(
name = name,
srcs = collections.uniq(sources),
hdrs = _llvm_headers(root) + hdrs,
deps = depends,
copts = ["-I$(GENDIR)/{0} -I{0}".format(_repo_path(i)) for i in includes],
**kwargs
)
def _add_llvm_library(ctx, name, *args):
sections = ["ADDITIONAL_HEADER_DIRS", "LINK_LIBS", "DEPENDS"]
kwargs = _make_kwargs(ctx, name, list(args), sections)
if name in ["LLVMHello", "LLVMTestingSupport"]:
return
_llvm_library(ctx, name = name, **kwargs)
def _map_llvm_lib(name):
if name.islower():
return name[0].title() + name[1:]
return name
def _clang_library(
ctx,
name,
srcs,
deps = [],
depends = [],
link_libs = [],
additional_headers = [],
llvm_link_components = [],
**kwargs):
root = _root_path(ctx)
deps = list(deps) + [":clang-c"]
deps.extend([_colonize(d) for d in depends])
deps.extend([_colonize(l) for l in link_libs])
deps.extend([":LLVM" + _map_llvm_lib(l) for l in llvm_link_components])
extra_dirs = collections.uniq([
_repo_path(_join_path(root, paths.dirname(s)))
for s in srcs
if paths.dirname(s)
] + [_repo_path(root)])
native.cc_library(
name = name,
srcs = [_join_path(root, s) for s in srcs] + _clang_srcglob(root),
hdrs = _clang_headers(root) + kwargs.pop("hdrs", []),
deps = collections.uniq(deps),
copts = ["-I%s -I$(GENDIR)/%s" % (d, d) for d in extra_dirs],
**kwargs
)
def _add_clang_library(ctx, name, *args):
sections = ["ADDITIONAL_HEADERS", "LINK_LIBS", "DEPENDS"]
kwargs = _make_kwargs(ctx, name, list(args), sections)
kwargs["llvm_link_components"] = _current(ctx).vars.get("LLVM_LINK_COMPONENTS", [])
_clang_library(ctx, name, **kwargs)
def _add_tablegen(ctx, name, tag, *srcs):
root = _root_path(ctx)
kwargs = _make_kwargs(ctx, name, [_join_path(root, s) for s in srcs])
kwargs["srcs"].extend(_llvm_srcglob(root))
deps = [
":LLVMSupport",
":LLVMTableGen",
":LLVMMC",
] + kwargs.pop("deps", [])
native.cc_binary(name = name, deps = deps, **kwargs)
def _set_cmake_var(ctx, key, *args):
if key in ("LLVM_TARGET_DEFINITIONS", "LLVM_LINK_COMPONENTS", "sources"):
_current(ctx).vars[key] = args
def _llvm_tablegen(ctx, kind, out, *opts):
cur = _current(ctx)
root = _root_path(ctx)
out = _join_path(root, out)
src = _join_path(root, cur.vars["LLVM_TARGET_DEFINITIONS"][0])
cur.table_outs.append(out)
includes = [root, "include"]
opts = " ".join(["-I " + _repo_path(i) for i in includes] + list(opts))
native.genrule(
name = _genfile_name(out),
outs = [out],
srcs = native.glob([
_join_path(root, "*.td"), # local_tds
"include/llvm/**/*.td", # global_tds
]),
tools = [":llvm-tblgen"],
cmd = "$(location :llvm-tblgen) %s $(location %s) -o $@" % (opts, src),
)
def _clang_diag_gen(ctx, comp):
_clang_tablegen(
ctx,
"Diagnostic%sKinds.inc" % comp,
"-gen-clang-diags-defs",
"-clang-component=" + comp,
"SOURCE",
"Diagnostic.td",
"TARGET",
"ClangDiagnostic" + comp,
)
def _clang_tablegen(ctx, out, *args):
root = _root_path(ctx)
out = _join_path(root, out)
name = _genfile_name(out)
kwargs = _make_kwargs(ctx, name, args, ["SOURCE", "TARGET", "-I"], leader = "opts")
src = _join_path(root, kwargs["source"][0])
includes = ["include/", root] + [
_join_path(root, p)
for p in kwargs.get("-i", [])
]
opts = " ".join(["-I " + _repo_path(i) for i in includes] + kwargs["opts"])
native.genrule(
name = name,
outs = [out],
srcs = native.glob([
_join_path(root, "*.td"), # local_tds
_join_path(paths.dirname(src), "*.td"), # local_tds
"include/llvm/**/*.td", # global_tds
]),
tools = [":clang-tblgen"],
cmd = "$(location :clang-tblgen) %s $(location %s) -o $@" % (opts, src),
)
_current(ctx).gen_hdrs.append(out)
target = kwargs.get("target")
if target:
native.cc_library(name = target[0], textual_hdrs = [out])
def _add_public_tablegen_target(ctx, name):
table_outs = _current(ctx).table_outs
includes = []
for out in table_outs:
include = paths.dirname(out)
if include not in includes and "include" in include:
includes.append(include)
native.cc_library(
name = name,
textual_hdrs = _current(ctx).table_outs + ctx._config.target_defaults.get(name, {}).get("textual_hdrs", []),
includes = includes,
)
def _add_llvm_target(ctx, name, *args):
sources = list(_current(ctx).vars.get("sources", []))
sources.extend(args)
_add_llvm_library(ctx, "LLVM" + name, *sources)
def _enter_directory(ctx, path):
ctx._state.append(struct(
path = path,
vars = {},
table_outs = [],
gen_hdrs = [],
))
return ctx
def _exit_directory(ctx, path):
gen_hdrs = _current(ctx).gen_hdrs
if gen_hdrs:
native.filegroup(
name = _genfile_name(_root_path(ctx)) + "_genhdrs",
srcs = gen_hdrs,
)
ctx._state.pop()
return ctx
def make_context(**kwargs):
return struct(
_state = [],
_config = struct(**kwargs),
enter_directory = _enter_directory,
exit_directory = _exit_directory,
set = _set_cmake_var,
configure_file = _configure_file,
add_llvm_library = _add_llvm_library,
add_llvm_target = _add_llvm_target,
add_clang_library = _add_clang_library,
add_tablegen = _add_tablegen,
tablegen = _llvm_tablegen,
clang_tablegen = _clang_tablegen,
clang_diag_gen = _clang_diag_gen,
add_public_tablegen_target = _add_public_tablegen_target,
)
| 33.722222 | 116 | 0.59244 | load("@io_kythe//tools/build_rules/llvm:configure_file.bzl", "configure_file")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("@bazel_skylib//lib:collections.bzl", "collections")
def _repo_path(path):
if native.repository_name() == "@":
return path
return paths.join("external", native.repository_name()[1:], path.lstrip("/"))
def _llvm_build_deps(ctx, name):
llvm_to_cmake = {
"Scalar": "ScalarOpts",
"IPO": "ipo",
"ObjCARC": "ObjCARCOpts",
}
cmake_to_llvm = dict([(v, k) for k, v in llvm_to_cmake.items()])
name = _replace_prefix(name, "LLVM", "")
name = cmake_to_llvm.get(name, name)
return [
":LLVM" + llvm_to_cmake.get(d, d)
for d in ctx._config.llvm_build_deps.get(name, [])
]
def _root_path(ctx):
return paths.join(*[s.path for s in ctx._state]).lstrip("/")
def _join_path(root, path):
if path.startswith("/"):
return paths.normalize(path.lstrip("/"))
return paths.normalize(paths.join(root.lstrip("/"), path))
def _llvm_headers(root):
root = _replace_prefix(root, "lib/", "include/llvm/")
return native.glob([_join_path(root, "**/*.*")])
def _replace_prefix(value, prefix, repl):
if value.startswith(prefix):
return repl + value[len(prefix):]
return value
def _clang_headers(root):
root = _replace_prefix(root, "tools/clang/lib/", "tools/clang/include/clang/")
return native.glob([_join_path(root, "**/*.*")])
def _llvm_srcglob(root, additional_header_dirs = []):
srcglob = [_join_path(root, "*.h"), _join_path(root, "*.inc")]
for dir in additional_header_dirs:
srcglob.extend([
paths.join(_join_path(root, dir), "*.h"),
paths.join(_join_path(root, dir), "*.inc"),
])
return native.glob(srcglob)
def _clang_srcglob(root):
return native.glob([_join_path(root, "**/*.h"), _join_path(root, "**/*.inc")])
def _current(ctx):
return ctx._state[-1]
def _colonize(name):
for prefix in [":", "/", "@"]:
if name.startswith(prefix):
return name
return ":" + name
def _genfile_name(path):
return path.lstrip("/").replace("/", "_").replace(".", "_").replace("-", "_")
def _group_sections(sections, args, leader = "srcs"):
blocks = [(leader, [])]
for arg in args:
if arg in sections:
blocks.append((arg.lower(), []))
continue
blocks[-1][-1].append(arg)
return blocks
def _make_kwargs(ctx, name, args = [], sections = [], leader = "srcs"):
kwargs = {}
kwargs.update(ctx._config.target_defaults.get(name, {}))
for key, values in _group_sections(sections, list(args), leader = leader):
kwargs.setdefault(key, []).extend([v for v in values if v])
return kwargs
def _configure_file(ctx, src, out, *unused):
if not (src and out):
return
root = _root_path(ctx)
configure_file(
name = _genfile_name(out),
src = _join_path(root, src),
out = _join_path(root, out),
defines = select({
"//conditions:default": ctx._config.cmake_defines.default,
"@io_kythe//:darwin": ctx._config.cmake_defines.darwin,
}),
)
def _llvm_library(ctx, name, srcs, hdrs = [], deps = [], additional_header_dirs = [], **kwargs):
kwargs.pop("link_libs", None)
root = _root_path(ctx)
depends = ([":llvm-c"] + deps +
kwargs.pop("depends", []) +
_llvm_build_deps(ctx, name))
depends = collections.uniq([_colonize(d) for d in depends])
defs = native.glob([_join_path(root, "*.def")])
if defs:
native.cc_library(
name = name + "_defs",
textual_hdrs = defs,
visibility = ["//visibility:private"],
)
depends.append(":" + name + "_defs")
sources = (
[_join_path(root, s) for s in srcs] +
_llvm_srcglob(root, additional_header_dirs) +
_current(ctx).table_outs
)
includes = [root]
if "/Target/" in root:
parts = root.split("/")
target = parts[parts.index("Target") + 1]
target_root = "/".join(parts[:parts.index("Target") + 2])
if target_root and target_root != root:
sources += native.glob([_join_path(target_root, "**/*.h")])
includes.append(target_root)
depends.append(":" + target + "CommonTableGen")
kind = _replace_prefix(name, "LLVM" + target, "")
target_kind_deps = {
"Utils": [":LLVMMC", ":LLVMCodeGen"],
"Info": [":LLVMMC", ":LLVMTarget"],
"AsmPrinter": [":LLVMTarget", ":LLVMCodeGen"],
}
depends += target_kind_deps.get(kind, [])
native.cc_library(
name = name,
srcs = collections.uniq(sources),
hdrs = _llvm_headers(root) + hdrs,
deps = depends,
copts = ["-I$(GENDIR)/{0} -I{0}".format(_repo_path(i)) for i in includes],
**kwargs
)
def _add_llvm_library(ctx, name, *args):
sections = ["ADDITIONAL_HEADER_DIRS", "LINK_LIBS", "DEPENDS"]
kwargs = _make_kwargs(ctx, name, list(args), sections)
if name in ["LLVMHello", "LLVMTestingSupport"]:
return
_llvm_library(ctx, name = name, **kwargs)
def _map_llvm_lib(name):
if name.islower():
return name[0].title() + name[1:]
return name
def _clang_library(
ctx,
name,
srcs,
deps = [],
depends = [],
link_libs = [],
additional_headers = [],
llvm_link_components = [],
**kwargs):
root = _root_path(ctx)
deps = list(deps) + [":clang-c"]
deps.extend([_colonize(d) for d in depends])
deps.extend([_colonize(l) for l in link_libs])
deps.extend([":LLVM" + _map_llvm_lib(l) for l in llvm_link_components])
extra_dirs = collections.uniq([
_repo_path(_join_path(root, paths.dirname(s)))
for s in srcs
if paths.dirname(s)
] + [_repo_path(root)])
native.cc_library(
name = name,
srcs = [_join_path(root, s) for s in srcs] + _clang_srcglob(root),
hdrs = _clang_headers(root) + kwargs.pop("hdrs", []),
deps = collections.uniq(deps),
copts = ["-I%s -I$(GENDIR)/%s" % (d, d) for d in extra_dirs],
**kwargs
)
def _add_clang_library(ctx, name, *args):
sections = ["ADDITIONAL_HEADERS", "LINK_LIBS", "DEPENDS"]
kwargs = _make_kwargs(ctx, name, list(args), sections)
kwargs["llvm_link_components"] = _current(ctx).vars.get("LLVM_LINK_COMPONENTS", [])
_clang_library(ctx, name, **kwargs)
def _add_tablegen(ctx, name, tag, *srcs):
root = _root_path(ctx)
kwargs = _make_kwargs(ctx, name, [_join_path(root, s) for s in srcs])
kwargs["srcs"].extend(_llvm_srcglob(root))
deps = [
":LLVMSupport",
":LLVMTableGen",
":LLVMMC",
] + kwargs.pop("deps", [])
native.cc_binary(name = name, deps = deps, **kwargs)
def _set_cmake_var(ctx, key, *args):
if key in ("LLVM_TARGET_DEFINITIONS", "LLVM_LINK_COMPONENTS", "sources"):
_current(ctx).vars[key] = args
def _llvm_tablegen(ctx, kind, out, *opts):
cur = _current(ctx)
root = _root_path(ctx)
out = _join_path(root, out)
src = _join_path(root, cur.vars["LLVM_TARGET_DEFINITIONS"][0])
cur.table_outs.append(out)
includes = [root, "include"]
opts = " ".join(["-I " + _repo_path(i) for i in includes] + list(opts))
native.genrule(
name = _genfile_name(out),
outs = [out],
srcs = native.glob([
_join_path(root, "*.td"),
"include/llvm/**/*.td",
]),
tools = [":llvm-tblgen"],
cmd = "$(location :llvm-tblgen) %s $(location %s) -o $@" % (opts, src),
)
def _clang_diag_gen(ctx, comp):
_clang_tablegen(
ctx,
"Diagnostic%sKinds.inc" % comp,
"-gen-clang-diags-defs",
"-clang-component=" + comp,
"SOURCE",
"Diagnostic.td",
"TARGET",
"ClangDiagnostic" + comp,
)
def _clang_tablegen(ctx, out, *args):
root = _root_path(ctx)
out = _join_path(root, out)
name = _genfile_name(out)
kwargs = _make_kwargs(ctx, name, args, ["SOURCE", "TARGET", "-I"], leader = "opts")
src = _join_path(root, kwargs["source"][0])
includes = ["include/", root] + [
_join_path(root, p)
for p in kwargs.get("-i", [])
]
opts = " ".join(["-I " + _repo_path(i) for i in includes] + kwargs["opts"])
native.genrule(
name = name,
outs = [out],
srcs = native.glob([
_join_path(root, "*.td"),
_join_path(paths.dirname(src), "*.td"),
"include/llvm/**/*.td",
]),
tools = [":clang-tblgen"],
cmd = "$(location :clang-tblgen) %s $(location %s) -o $@" % (opts, src),
)
_current(ctx).gen_hdrs.append(out)
target = kwargs.get("target")
if target:
native.cc_library(name = target[0], textual_hdrs = [out])
def _add_public_tablegen_target(ctx, name):
table_outs = _current(ctx).table_outs
includes = []
for out in table_outs:
include = paths.dirname(out)
if include not in includes and "include" in include:
includes.append(include)
native.cc_library(
name = name,
textual_hdrs = _current(ctx).table_outs + ctx._config.target_defaults.get(name, {}).get("textual_hdrs", []),
includes = includes,
)
def _add_llvm_target(ctx, name, *args):
sources = list(_current(ctx).vars.get("sources", []))
sources.extend(args)
_add_llvm_library(ctx, "LLVM" + name, *sources)
def _enter_directory(ctx, path):
ctx._state.append(struct(
path = path,
vars = {},
table_outs = [],
gen_hdrs = [],
))
return ctx
def _exit_directory(ctx, path):
gen_hdrs = _current(ctx).gen_hdrs
if gen_hdrs:
native.filegroup(
name = _genfile_name(_root_path(ctx)) + "_genhdrs",
srcs = gen_hdrs,
)
ctx._state.pop()
return ctx
def make_context(**kwargs):
return struct(
_state = [],
_config = struct(**kwargs),
enter_directory = _enter_directory,
exit_directory = _exit_directory,
set = _set_cmake_var,
configure_file = _configure_file,
add_llvm_library = _add_llvm_library,
add_llvm_target = _add_llvm_target,
add_clang_library = _add_clang_library,
add_tablegen = _add_tablegen,
tablegen = _llvm_tablegen,
clang_tablegen = _clang_tablegen,
clang_diag_gen = _clang_diag_gen,
add_public_tablegen_target = _add_public_tablegen_target,
)
| true | true |
1c39654fddeef90c7cae5292359b0417674b4bab | 1,331 | py | Python | lib/utils/blob.py | czy779509408/text-detection-ctpn | e0cf757a33e83ead6dd6330ba47f5053de12c506 | [
"MIT"
] | 2,744 | 2018-04-13T09:51:22.000Z | 2022-03-29T03:07:19.000Z | lib/utils/blob.py | infinitisun/text-detection-ctpn | b94c3af3d5105b5a9ff4d4a00edf92b2d55ee4cf | [
"MIT"
] | 370 | 2018-04-17T05:36:53.000Z | 2022-02-22T02:54:10.000Z | lib/utils/blob.py | infinitisun/text-detection-ctpn | b94c3af3d5105b5a9ff4d4a00edf92b2d55ee4cf | [
"MIT"
] | 1,145 | 2018-04-13T09:52:49.000Z | 2022-03-29T02:21:13.000Z | """Blob helper functions."""
import numpy as np
import cv2
from ..fast_rcnn.config import cfg
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
if cfg.TRAIN.RANDOM_DOWNSAMPLE:
r = 0.6 + np.random.rand() * 0.4
im_scale *= r
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
| 34.128205 | 75 | 0.641623 | import numpy as np
import cv2
from ..fast_rcnn.config import cfg
def im_list_to_blob(ims):
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
if cfg.TRAIN.RANDOM_DOWNSAMPLE:
r = 0.6 + np.random.rand() * 0.4
im_scale *= r
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
| true | true |
1c3966ae8c19d2b47f51f085ab2346131c2fd72e | 1,679 | py | Python | classification/video/models/multi_column.py | liaojh1998/cross-modal-concept2robot | 2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e | [
"MIT"
] | 4 | 2021-08-04T08:14:36.000Z | 2022-03-14T05:59:46.000Z | classification/video/models/multi_column.py | liaojh1998/cross-modal-concept2robot | 2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e | [
"MIT"
] | null | null | null | classification/video/models/multi_column.py | liaojh1998/cross-modal-concept2robot | 2a00937eb2ac02cbe3d5d5fa0f5868e85d194f6e | [
"MIT"
] | 2 | 2021-08-28T13:19:31.000Z | 2021-09-17T17:48:41.000Z | import torch.nn as nn
import torch as th
class MultiColumn(nn.Module):
def __init__(self, num_classes, conv_column, column_units, clf_layers=None):
"""
- Example multi-column network
- Useful when a video sample is too long and has to be split into
multiple clips
- Processes 3D-CNN on each clip and averages resulting features across
clips before passing it to classification(FC) layer.
Args:
- Input: Takes in a list of tensors each of size (batch_size, 3, sequence_length, W, H)
- Returns: logits of size (batch_size, num_classes)
"""
super(MultiColumn,self).__init__()
self.num_classes = num_classes
self.column_units = column_units
self.conv_column = conv_column(column_units)
self.clf_layers = clf_layers
if not self.clf_layers:
self.clf_layers = th.nn.Sequential(
nn.Linear(column_units, self.num_classes)
)
def forward(self, inputs, get_features=False):
outputs = []
num_cols = len(inputs)
for idx in range(num_cols):
x = inputs[idx]
x1 = self.conv_column(x)
outputs.append(x1)
outputs = th.stack(outputs).permute(1,0,2)
outputs = th.squeeze(th.sum(outputs, 1), 1)
avg_output = outputs / float(num_cols)
outputs = self.clf_layers(avg_output)
if get_features:
return outputs, avg_output
else:
return outputs
if __name__ == "__main__":
from model3D import Model
num_classes = 174
input_tensor = [th.autograd.Variable(th.rand(1,3,72,84,84)) for i in range(5)]
print(input_tensor[0].size())
model = MultiColumn(174, Model, 512)
output = model(input_tensor)
| 31.679245 | 91 | 0.670042 | import torch.nn as nn
import torch as th
class MultiColumn(nn.Module):
def __init__(self, num_classes, conv_column, column_units, clf_layers=None):
super(MultiColumn,self).__init__()
self.num_classes = num_classes
self.column_units = column_units
self.conv_column = conv_column(column_units)
self.clf_layers = clf_layers
if not self.clf_layers:
self.clf_layers = th.nn.Sequential(
nn.Linear(column_units, self.num_classes)
)
def forward(self, inputs, get_features=False):
outputs = []
num_cols = len(inputs)
for idx in range(num_cols):
x = inputs[idx]
x1 = self.conv_column(x)
outputs.append(x1)
outputs = th.stack(outputs).permute(1,0,2)
outputs = th.squeeze(th.sum(outputs, 1), 1)
avg_output = outputs / float(num_cols)
outputs = self.clf_layers(avg_output)
if get_features:
return outputs, avg_output
else:
return outputs
if __name__ == "__main__":
from model3D import Model
num_classes = 174
input_tensor = [th.autograd.Variable(th.rand(1,3,72,84,84)) for i in range(5)]
print(input_tensor[0].size())
model = MultiColumn(174, Model, 512)
output = model(input_tensor)
| true | true |
1c3966d1c3bf00b34857f8cdb57221aed0476bac | 2,833 | py | Python | stellar_sdk/xdr/data_entry.py | MartinThoma/py-stellar-base | 07ab28cde7a7040f2262b224f9af8a3416c0e5ab | [
"Apache-2.0"
] | 341 | 2015-10-06T20:56:19.000Z | 2022-03-23T15:58:54.000Z | stellar_sdk/xdr/data_entry.py | MartinThoma/py-stellar-base | 07ab28cde7a7040f2262b224f9af8a3416c0e5ab | [
"Apache-2.0"
] | 479 | 2015-11-09T18:39:40.000Z | 2022-03-16T06:46:58.000Z | stellar_sdk/xdr/data_entry.py | MartinThoma/py-stellar-base | 07ab28cde7a7040f2262b224f9af8a3416c0e5ab | [
"Apache-2.0"
] | 181 | 2015-10-01T23:00:59.000Z | 2022-03-05T13:42:19.000Z | # This is an automatically generated file.
# DO NOT EDIT or your changes may be overwritten
import base64
from xdrlib import Packer, Unpacker
from .account_id import AccountID
from .data_entry_ext import DataEntryExt
from .data_value import DataValue
from .string64 import String64
__all__ = ["DataEntry"]
class DataEntry:
"""
XDR Source Code
----------------------------------------------------------------
struct DataEntry
{
AccountID accountID; // account this data belongs to
string64 dataName;
DataValue dataValue;
// reserved for future use
union switch (int v)
{
case 0:
void;
}
ext;
};
----------------------------------------------------------------
"""
def __init__(
self,
account_id: AccountID,
data_name: String64,
data_value: DataValue,
ext: DataEntryExt,
) -> None:
self.account_id = account_id
self.data_name = data_name
self.data_value = data_value
self.ext = ext
def pack(self, packer: Packer) -> None:
self.account_id.pack(packer)
self.data_name.pack(packer)
self.data_value.pack(packer)
self.ext.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "DataEntry":
account_id = AccountID.unpack(unpacker)
data_name = String64.unpack(unpacker)
data_value = DataValue.unpack(unpacker)
ext = DataEntryExt.unpack(unpacker)
return cls(
account_id=account_id,
data_name=data_name,
data_value=data_value,
ext=ext,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "DataEntry":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "DataEntry":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.account_id == other.account_id
and self.data_name == other.data_name
and self.data_value == other.data_value
and self.ext == other.ext
)
def __str__(self):
out = [
f"account_id={self.account_id}",
f"data_name={self.data_name}",
f"data_value={self.data_value}",
f"ext={self.ext}",
]
return f"<DataEntry {[', '.join(out)]}>"
| 27.504854 | 68 | 0.56689 |
import base64
from xdrlib import Packer, Unpacker
from .account_id import AccountID
from .data_entry_ext import DataEntryExt
from .data_value import DataValue
from .string64 import String64
__all__ = ["DataEntry"]
class DataEntry:
def __init__(
self,
account_id: AccountID,
data_name: String64,
data_value: DataValue,
ext: DataEntryExt,
) -> None:
self.account_id = account_id
self.data_name = data_name
self.data_value = data_value
self.ext = ext
def pack(self, packer: Packer) -> None:
self.account_id.pack(packer)
self.data_name.pack(packer)
self.data_value.pack(packer)
self.ext.pack(packer)
@classmethod
def unpack(cls, unpacker: Unpacker) -> "DataEntry":
account_id = AccountID.unpack(unpacker)
data_name = String64.unpack(unpacker)
data_value = DataValue.unpack(unpacker)
ext = DataEntryExt.unpack(unpacker)
return cls(
account_id=account_id,
data_name=data_name,
data_value=data_value,
ext=ext,
)
def to_xdr_bytes(self) -> bytes:
packer = Packer()
self.pack(packer)
return packer.get_buffer()
@classmethod
def from_xdr_bytes(cls, xdr: bytes) -> "DataEntry":
unpacker = Unpacker(xdr)
return cls.unpack(unpacker)
def to_xdr(self) -> str:
xdr_bytes = self.to_xdr_bytes()
return base64.b64encode(xdr_bytes).decode()
@classmethod
def from_xdr(cls, xdr: str) -> "DataEntry":
xdr_bytes = base64.b64decode(xdr.encode())
return cls.from_xdr_bytes(xdr_bytes)
def __eq__(self, other: object):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.account_id == other.account_id
and self.data_name == other.data_name
and self.data_value == other.data_value
and self.ext == other.ext
)
def __str__(self):
out = [
f"account_id={self.account_id}",
f"data_name={self.data_name}",
f"data_value={self.data_value}",
f"ext={self.ext}",
]
return f"<DataEntry {[', '.join(out)]}>"
| true | true |
1c3966eb786a38df5ac338b4127d1c9e4ab97ea4 | 2,016 | py | Python | modmail/__init__.py | Salvi0/modmail-2 | d47e09784d6c9c063ec4083a1a4f40ecb275bdab | [
"MIT"
] | 23 | 2021-05-17T06:07:55.000Z | 2021-05-23T00:57:39.000Z | modmail/__init__.py | Salvi0/modmail-2 | d47e09784d6c9c063ec4083a1a4f40ecb275bdab | [
"MIT"
] | 19 | 2021-11-01T07:21:11.000Z | 2022-01-14T04:59:34.000Z | modmail/__init__.py | Salvi0/modmail-2 | d47e09784d6c9c063ec4083a1a4f40ecb275bdab | [
"MIT"
] | null | null | null | import asyncio
import logging
import logging.handlers
import os
from pathlib import Path
import coloredlogs
from modmail.log import ModmailLogger
# On windows aiodns's asyncio support relies on APIs like add_reader (which aiodns uses)
# are not guaranteed to be available, and in particular are not available when using the
# ProactorEventLoop on Windows, this method is only supported with Windows SelectorEventLoop
if os.name == "nt":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
logging.TRACE = 5
logging.NOTICE = 25
logging.addLevelName(logging.TRACE, "TRACE")
logging.addLevelName(logging.NOTICE, "NOTICE")
LOG_FILE_SIZE = 8 * (2 ** 10) ** 2 # 8MB, discord upload limit
# this logging level is set to logging.TRACE because if it is not set to the lowest level,
# the child level will be limited to the lowest level this is set to.
ROOT_LOG_LEVEL = logging.TRACE
FMT = "%(asctime)s %(levelname)10s %(name)15s - [%(lineno)5d]: %(message)s"
DATEFMT = "%Y/%m/%d %H:%M:%S"
logging.setLoggerClass(ModmailLogger)
# Set up file logging
log_file = Path("logs", "bot.log")
log_file.parent.mkdir(parents=True, exist_ok=True)
# file handler
file_handler = logging.handlers.RotatingFileHandler(
log_file,
maxBytes=LOG_FILE_SIZE,
backupCount=7,
encoding="utf-8",
)
file_handler.setFormatter(
logging.Formatter(
fmt=FMT,
datefmt=DATEFMT,
)
)
file_handler.setLevel(logging.TRACE)
coloredlogs.DEFAULT_LEVEL_STYLES["trace"] = coloredlogs.DEFAULT_LEVEL_STYLES["spam"]
coloredlogs.install(level=logging.TRACE, fmt=FMT, datefmt=DATEFMT)
# Create root logger
root: ModmailLogger = logging.getLogger()
root.setLevel(ROOT_LOG_LEVEL)
root.addHandler(file_handler)
# Silence irrelevant loggers
logging.getLogger("discord").setLevel(logging.WARNING)
logging.getLogger("websockets").setLevel(logging.ERROR)
# Set asyncio logging back to the default of INFO even if asyncio's debug mode is enabled.
logging.getLogger("asyncio").setLevel(logging.INFO)
| 29.217391 | 92 | 0.764881 | import asyncio
import logging
import logging.handlers
import os
from pathlib import Path
import coloredlogs
from modmail.log import ModmailLogger
# are not guaranteed to be available, and in particular are not available when using the
# ProactorEventLoop on Windows, this method is only supported with Windows SelectorEventLoop
if os.name == "nt":
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
logging.TRACE = 5
logging.NOTICE = 25
logging.addLevelName(logging.TRACE, "TRACE")
logging.addLevelName(logging.NOTICE, "NOTICE")
LOG_FILE_SIZE = 8 * (2 ** 10) ** 2 # 8MB, discord upload limit
# this logging level is set to logging.TRACE because if it is not set to the lowest level,
# the child level will be limited to the lowest level this is set to.
ROOT_LOG_LEVEL = logging.TRACE
FMT = "%(asctime)s %(levelname)10s %(name)15s - [%(lineno)5d]: %(message)s"
DATEFMT = "%Y/%m/%d %H:%M:%S"
logging.setLoggerClass(ModmailLogger)
# Set up file logging
log_file = Path("logs", "bot.log")
log_file.parent.mkdir(parents=True, exist_ok=True)
# file handler
file_handler = logging.handlers.RotatingFileHandler(
log_file,
maxBytes=LOG_FILE_SIZE,
backupCount=7,
encoding="utf-8",
)
file_handler.setFormatter(
logging.Formatter(
fmt=FMT,
datefmt=DATEFMT,
)
)
file_handler.setLevel(logging.TRACE)
coloredlogs.DEFAULT_LEVEL_STYLES["trace"] = coloredlogs.DEFAULT_LEVEL_STYLES["spam"]
coloredlogs.install(level=logging.TRACE, fmt=FMT, datefmt=DATEFMT)
# Create root logger
root: ModmailLogger = logging.getLogger()
root.setLevel(ROOT_LOG_LEVEL)
root.addHandler(file_handler)
# Silence irrelevant loggers
logging.getLogger("discord").setLevel(logging.WARNING)
logging.getLogger("websockets").setLevel(logging.ERROR)
# Set asyncio logging back to the default of INFO even if asyncio's debug mode is enabled.
logging.getLogger("asyncio").setLevel(logging.INFO)
| true | true |
1c3966f40eff3e4c45413f5ea232dbbe2997a3ca | 879 | py | Python | petidlookup/__main__.py | puremourning/petidlookup | d436895146cdc76325d48e417b286acd50f787d7 | [
"Apache-2.0"
] | 1 | 2020-08-08T07:56:32.000Z | 2020-08-08T07:56:32.000Z | petidlookup/__main__.py | puremourning/petidlookup | d436895146cdc76325d48e417b286acd50f787d7 | [
"Apache-2.0"
] | null | null | null | petidlookup/__main__.py | puremourning/petidlookup | d436895146cdc76325d48e417b286acd50f787d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Ben Jackson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import lookup
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument( '--file', '-f', type=str, help='Input file name' )
parser.add_argument( '--outfile', '-o', type=str, help='Output file name' )
args = parser.parse_args()
lookup.run( args )
| 35.16 | 77 | 0.734926 |
from . import lookup
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument( '--file', '-f', type=str, help='Input file name' )
parser.add_argument( '--outfile', '-o', type=str, help='Output file name' )
args = parser.parse_args()
lookup.run( args )
| true | true |
1c3967230e860dd0c7209ef55cc6ca22ad497729 | 160 | py | Python | food/urls.py | evangelos1990/firsttry | c782a58cca636a81da0102e4e4294fd5c9e86d8a | [
"Unlicense"
] | null | null | null | food/urls.py | evangelos1990/firsttry | c782a58cca636a81da0102e4e4294fd5c9e86d8a | [
"Unlicense"
] | 2 | 2020-06-14T12:36:35.000Z | 2021-06-10T23:03:13.000Z | food/urls.py | evangelos1990/firsttry | c782a58cca636a81da0102e4e4294fd5c9e86d8a | [
"Unlicense"
] | null | null | null | # food/urls.py
from django.conf.urls import url
from django.urls import path
from food import views
urlpatterns = [
path('', views.index, name='index'),
] | 17.777778 | 40 | 0.7125 |
from django.conf.urls import url
from django.urls import path
from food import views
urlpatterns = [
path('', views.index, name='index'),
] | true | true |
1c396842a02555f4ec3e88322fd8a6786b41cd9f | 3,508 | py | Python | test/scripts/test_restart.py | fujiehuang/ecto | fea744337aa1fad1397c9a3ba5baa143993cb5eb | [
"BSD-3-Clause"
] | null | null | null | test/scripts/test_restart.py | fujiehuang/ecto | fea744337aa1fad1397c9a3ba5baa143993cb5eb | [
"BSD-3-Clause"
] | null | null | null | test/scripts/test_restart.py | fujiehuang/ecto | fea744337aa1fad1397c9a3ba5baa143993cb5eb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys, ecto, ecto_test
def makeplasm(N):
plasm = ecto.Plasm()
gen = ecto_test.Generate(start=1, step=1)
quitter = ecto_test.QuitAfter(N=N)
plasm.connect(gen[:] >> quitter[:])
return (quitter, gen, plasm)
# def do_one_st(N, j):
# #print "multithreaded test w/ quit after", N
# (gen, plasm) = makeplasm(N)
#
# sched = ecto.Scheduler(plasm)
# for i in range(j):
# sched.execute(niter=N+10)
# # print "="*70
#
# #print "singlethreaded: actual out:", gen.outputs.out, " N:", N
# assert (j*N - 1.0) == gen.outputs.out
# #print "\n" * 5
#
# do_one_st(1,1)
# do_one_st(1,2)
# do_one_st(2,1)
# do_one_st(2,2)
# for N in range(1, 100, 10):
# for loops in range(2, 10, 2):
# do_one_st(N, loops)
def do_one_impl(SchedType, countto, nthreads, niter):
print("*"*80, "\n", SchedType, "test w/ quit after", countto, " nthreads=", nthreads, "niter=", niter)
(quitter, gen, plasm) = makeplasm(countto)
sched = SchedType(plasm)
quitout = countto
for j in range(niter):
sched.execute(niter=countto+100)
print(sched.stats())
print("j:", j)
print("niter:", countto+100)
print("countto:", countto)
print("nthreads:", nthreads)
print("quitter.out: ", quitter.outputs.out)
print("gen.out: ", gen.outputs.out)
print("quitout:", quitout)
assert quitter.outputs.out == quitout
quitout += countto + 1 # because the gen will have fired an unused value
# assert gen.outputs.out == countto + dist
def do_one(countto, nthreads, niter):
for S in [ecto.Scheduler]:
do_one_impl(S, countto, nthreads, niter)
do_one(1, 1, 1)
do_one(1, 2, 1)
do_one(2, 1, 2)
do_one(2, 8, 2)
for i in range(1, 200, 13):
for nthreads in range(2, 10, 2):
for niter in range(2, 10, 2):
do_one(i, nthreads, niter)
| 34.732673 | 106 | 0.670182 |
import sys, ecto, ecto_test
def makeplasm(N):
plasm = ecto.Plasm()
gen = ecto_test.Generate(start=1, step=1)
quitter = ecto_test.QuitAfter(N=N)
plasm.connect(gen[:] >> quitter[:])
return (quitter, gen, plasm)
tto, " nthreads=", nthreads, "niter=", niter)
(quitter, gen, plasm) = makeplasm(countto)
sched = SchedType(plasm)
quitout = countto
for j in range(niter):
sched.execute(niter=countto+100)
print(sched.stats())
print("j:", j)
print("niter:", countto+100)
print("countto:", countto)
print("nthreads:", nthreads)
print("quitter.out: ", quitter.outputs.out)
print("gen.out: ", gen.outputs.out)
print("quitout:", quitout)
assert quitter.outputs.out == quitout
quitout += countto + 1
def do_one(countto, nthreads, niter):
for S in [ecto.Scheduler]:
do_one_impl(S, countto, nthreads, niter)
do_one(1, 1, 1)
do_one(1, 2, 1)
do_one(2, 1, 2)
do_one(2, 8, 2)
for i in range(1, 200, 13):
for nthreads in range(2, 10, 2):
for niter in range(2, 10, 2):
do_one(i, nthreads, niter)
| true | true |
1c396bf46db83c817eec77146fc8d8af2b14f3f1 | 63 | py | Python | learn_python/learn_pytest_and_allure/tests/utils/help_functions.py | yehonadav/yonadav_tutorials | e797fdaeaea4c5d85392f724442645afb9391f15 | [
"Apache-2.0"
] | 2 | 2019-08-04T17:30:53.000Z | 2020-09-21T08:39:55.000Z | learn_python/learn_pytest_and_allure/tests/utils/help_functions.py | yehonadav/yonadav_tutorials | e797fdaeaea4c5d85392f724442645afb9391f15 | [
"Apache-2.0"
] | 5 | 2019-10-31T14:55:58.000Z | 2022-02-26T04:06:39.000Z | learn_python/learn_pytest_and_allure/tests/utils/help_functions.py | yehonadav/yonadav_tutorials | e797fdaeaea4c5d85392f724442645afb9391f15 | [
"Apache-2.0"
] | null | null | null | def f():
raise SystemExit(1)
def inc(x):
return x + 1 | 10.5 | 23 | 0.555556 | def f():
raise SystemExit(1)
def inc(x):
return x + 1 | true | true |
1c396d96ba17524bb112d648221bced5c3b70181 | 1,061 | py | Python | repos/system_upgrade/el7toel8/actors/rpmscanner/actor.py | Jakuje/leapp-repository | 580540f68bd4f89152c28935f775f660c2db0839 | [
"Apache-2.0"
] | null | null | null | repos/system_upgrade/el7toel8/actors/rpmscanner/actor.py | Jakuje/leapp-repository | 580540f68bd4f89152c28935f775f660c2db0839 | [
"Apache-2.0"
] | 1 | 2020-06-15T18:32:21.000Z | 2020-06-15T18:32:21.000Z | repos/system_upgrade/el7toel8/actors/rpmscanner/actor.py | kubco2/leapp-repository | ad98ad91d06d0adfe945566a414c95df862c4172 | [
"Apache-2.0"
] | null | null | null | from leapp.actors import Actor
from leapp.libraries.common.rpms import get_installed_rpms
from leapp.models import InstalledRPM, RPM
from leapp.tags import IPUWorkflowTag, FactsPhaseTag
class RpmScanner(Actor):
"""
Provides data about installed RPM Packages.
After collecting data from RPM query, a message with relevant data will be produced.
"""
name = 'rpm_scanner'
consumes = ()
produces = (InstalledRPM,)
tags = (IPUWorkflowTag, FactsPhaseTag)
def process(self):
output = get_installed_rpms()
result = InstalledRPM()
for entry in output:
entry = entry.strip()
if not entry:
continue
name, version, release, epoch, packager, arch, pgpsig = entry.split('|')
result.items.append(RPM(
name=name,
version=version,
epoch=epoch,
packager=packager,
arch=arch,
release=release,
pgpsig=pgpsig))
self.produce(result)
| 28.675676 | 88 | 0.598492 | from leapp.actors import Actor
from leapp.libraries.common.rpms import get_installed_rpms
from leapp.models import InstalledRPM, RPM
from leapp.tags import IPUWorkflowTag, FactsPhaseTag
class RpmScanner(Actor):
name = 'rpm_scanner'
consumes = ()
produces = (InstalledRPM,)
tags = (IPUWorkflowTag, FactsPhaseTag)
def process(self):
output = get_installed_rpms()
result = InstalledRPM()
for entry in output:
entry = entry.strip()
if not entry:
continue
name, version, release, epoch, packager, arch, pgpsig = entry.split('|')
result.items.append(RPM(
name=name,
version=version,
epoch=epoch,
packager=packager,
arch=arch,
release=release,
pgpsig=pgpsig))
self.produce(result)
| true | true |
1c396dc98cc3b66a53dfb013d2b11bad8dc6be3c | 14,742 | py | Python | simulate/parse/_system_topology.py | charles9li/simulate-openmm | cfc76294dd4b00147769fc83c7673fce5bd499cc | [
"MIT"
] | null | null | null | simulate/parse/_system_topology.py | charles9li/simulate-openmm | cfc76294dd4b00147769fc83c7673fce5bd499cc | [
"MIT"
] | null | null | null | simulate/parse/_system_topology.py | charles9li/simulate-openmm | cfc76294dd4b00147769fc83c7673fce5bd499cc | [
"MIT"
] | null | null | null | """
_system_topology.py: Parses topology information for a system.
Copyright (c) 2020 Charles Li // UCSB, Department of Chemical Engineering
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import absolute_import
__author__ = "Charles Li"
__version__ = "1.0"
import os
from ast import literal_eval
import numpy as np
from simtk.openmm.app import Element, ForceField, NoCutoff, Topology
from simtk.unit import nanometer
from ._options import _Options
from ._system_topology_chain import ChainOptions, BranchedChainOptions
class _TopologyOptions(_Options):
# =========================================================================
_SECTION_NAME = '_Topology'
# =========================================================================
def __init__(self, system_options):
super(_TopologyOptions, self).__init__()
self.system_options = system_options
self._topology = None
# =========================================================================
# Helper functions for parsing input
def _create_filepath(self, filepath):
directory = self.system_options.input_options.directory
return os.path.join(directory, filepath)
# =========================================================================
def topology(self):
return self._topology
def create_system(self, nonbondedMethod=NoCutoff, nonbondedCutoff=1.0*nanometer,
constraints=None, rigidWater=True, implicitSolvent=None,
soluteDielectric=1.0, solventDielectric=78.5,
ewaldErrorTolerance=0.0005, removeCMMotion=True,
hydrogenMass=None):
pass
class AmberTopologyOptions(_TopologyOptions):
_SECTION_NAME = 'AmberTopology'
def __init__(self, system_options):
super(AmberTopologyOptions, self).__init__(system_options)
raise NotImplementedError("'{}' is not supported yet.".format(self._SECTION_NAME))
class GromacsTopologyOptions(_TopologyOptions):
_SECTION_NAME = 'GromacsTopology'
# =========================================================================
def __init__(self, system_options):
super(GromacsTopologyOptions, self).__init__(system_options)
self.topFilename = None
self.groFilename = None
self._gromacs_topology = None
def _create_options(self):
super(GromacsTopologyOptions, self)._create_options()
self._OPTIONS['topFilename'] = self._parse_top_filename
self._OPTIONS['groFilename'] = self._parse_gro_filename
# =========================================================================
def _check_for_incomplete_input(self):
if self.topFilename is None:
self._incomplete_error('topFilename')
if self.groFilename is None:
self._incomplete_error('groFilename')
# =========================================================================
def _parse_top_filename(self, *args):
self.topFilename = self._create_filepath(args[0])
def _parse_gro_filename(self, *args):
self.groFilename = self._create_filepath(args[0])
# =========================================================================
def topology(self):
self._create_gromacs_topology()
return self._gromacs_topology.topology
def _create_gromacs_topology(self):
from parmed import gromacs
if self._gromacs_topology is None:
gro = gromacs.GromacsGroFile.parse(self.groFilename)
self._gromacs_topology = gromacs.GromacsTopologyFile(self.topFilename)
self._gromacs_topology.box = gro.box
def create_system(self, nonbondedMethod=NoCutoff, nonbondedCutoff=1.0*nanometer,
constraints=None, rigidWater=True, implicitSolvent=None,
soluteDielectric=1.0, solventDielectric=78.5,
ewaldErrorTolerance=0.0005, removeCMMotion=True,
hydrogenMass=None):
self._create_gromacs_topology()
return self._gromacs_topology.createSystem(nonbondedMethod=nonbondedMethod,
nonbondedCutoff=nonbondedCutoff,
constraints=constraints, rigidWater=rigidWater, implicitSolvent=implicitSolvent,
soluteDielectric=soluteDielectric, solventDielectric=solventDielectric,
ewaldErrorTolerance=ewaldErrorTolerance, removeCMMotion=removeCMMotion,
hydrogenMass=hydrogenMass)
class DodecaneAcrylateTopologyOptions(_TopologyOptions):
# =========================================================================
_SECTION_NAME = "DodecaneAcrylateTopology"
# =========================================================================
# Paths to forcefield files
data_directory = os.path.join(os.path.dirname(__file__), 'data')
TRAPPEUA_FF_PATH = os.path.join(data_directory, "trappeua-acrylates.xml")
OPLS_AA_PATH = os.path.join(data_directory, "opls_aa.xml")
# =========================================================================
def __init__(self, system_options):
super(DodecaneAcrylateTopologyOptions, self).__init__(system_options)
self.forceField_str = "TraPPE-UA"
self.forceField = ForceField(self.TRAPPEUA_FF_PATH)
self.numDodecane = 0
self.numSqualane = 0
self.dodecaneInstructions = None
self.box_vectors = None
self.chains = []
self.branched_chains = []
self.id_to_sequence = {}
def _create_options(self):
super(DodecaneAcrylateTopologyOptions, self)._create_options()
self._OPTIONS['forceField'] = self._parse_force_field
self._OPTIONS['numDodecane'] = self._parse_num_dodecane
self._OPTIONS['numSqualane'] = self._parse_num_squalane
self._OPTIONS['dodecaneInstructions'] = self._parse_dodecane_instructions
self._OPTIONS['box'] = self._parse_box
def _create_sections(self):
super(DodecaneAcrylateTopologyOptions, self)._create_sections()
self._SECTIONS['chain'] = self._parse_chain
self._SECTIONS['BranchedChain'] = self._parse_branched_chain
# =========================================================================
def _parse_force_field(self, *args):
if args[0] == 'TraPPE-UA':
self.forceField = ForceField(self.TRAPPEUA_FF_PATH)
elif args[0] == 'OPLS-AA':
self.forceField = ForceField(self.OPLS_AA_PATH)
else:
raise ValueError("Invalid force field.")
self.forceField_str = args[0]
def _parse_num_dodecane(self, *args):
self.numDodecane = literal_eval(args[0])
def _parse_num_squalane(self, *args):
self.numSqualane = literal_eval(args[0])
def _parse_dodecane_instructions(self, *args):
self.dodecaneInstructions = [instruction.strip() for instruction in args[0].split('/')]
def _parse_box(self, *args):
a, b, c = args[0].split(' ')
self.box_vectors = np.array([[literal_eval(a), 0.0, 0.0],
[0.0, literal_eval(b), 0.0],
[0.0, 0.0, literal_eval(c)]])*nanometer
def _parse_chain(self, *args):
line_deque = args[1]
chain_options = ChainOptions(self)
chain_options.parse(line_deque.popleft())
self.chains.append(chain_options)
def _parse_branched_chain(self, *args):
line_deque = args[1]
branched_chain_options = BranchedChainOptions(self)
branched_chain_options.parse(line_deque.popleft())
self.branched_chains.append(branched_chain_options)
# =========================================================================
def topology(self):
self._create_dodecane_acrylate_topology()
return self._topology
def _create_dodecane_acrylate_topology(self):
if self._topology is None:
topology = Topology()
if self.box_vectors is not None:
topology.setPeriodicBoxVectors(self.box_vectors)
for chain_option in self.chains:
id_to_sequence = chain_option.add_chain_to_topology(topology)
self.id_to_sequence.update(id_to_sequence)
for branched_chain_option in self.branched_chains:
branched_chain_option.add_chain_to_topology(topology)
for _ in range(self.numDodecane):
dodecane_id = self._add_dodecane_to_topology(topology)
self.id_to_sequence[dodecane_id] = "C12"
for _ in range(self.numSqualane):
squalane_id = self._add_squalane_to_topology(topology)
self.id_to_sequence[squalane_id] = "squalane"
self._topology = topology
def _add_dodecane_to_topology(self, topology):
# Carbon element
carbon_element = Element.getBySymbol('C')
hydrogen_element = Element.getBySymbol('H')
chain = topology.addChain("{}-C12".format(topology.getNumChains() + 1))
residue = topology.addResidue("C12", chain)
prev_atom = topology.addAtom("C", carbon_element, residue)
if self.forceField_str == "TraPPE-UA":
for i in range(11):
curr_atom = topology.addAtom("C{}".format(i + 1), carbon_element, residue)
topology.addBond(prev_atom, curr_atom)
prev_atom = curr_atom
else:
H_counter = 0
for _ in range(3):
H = topology.addAtom("H{}".format(H_counter), hydrogen_element, residue)
topology.addBond(H, prev_atom)
for i in range(11):
curr_atom = topology.addAtom("C{}".format(i + 1), carbon_element, residue)
topology.addBond(prev_atom, curr_atom)
for _ in range(2):
H = topology.addAtom("H{}".format(H_counter), hydrogen_element, residue)
topology.addBond(H, curr_atom)
prev_atom = curr_atom
H = topology.addAtom("H{}".format(H_counter), hydrogen_element, residue)
topology.addBond(H, prev_atom)
return chain.id
def _add_squalane_to_topology(self, topology):
# Carbon element
carbon_element = Element.getBySymbol('C')
hydrogen_element = Element.getBySymbol('H')
chain = topology.addChain("{}-squalane".format(topology.getNumChains() + 1))
residue = topology.addResidue("squalane", chain)
prev_atom = None
if self.forceField_str == "TraPPE-UA":
atom_index = 0
for _ in range(3):
C1 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
if prev_atom is not None:
topology.addBond(prev_atom, C1)
C2 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C1, C2)
C3 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C2, C3)
C4 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C2, C4)
C5 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C4, C5)
prev_atom = C5
for _ in range(3):
C1 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(prev_atom, C1)
C2 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C1, C2)
C3 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C2, C3)
C4 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C3, C4)
C5 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C3, C5)
prev_atom = C5
else:
raise NotImplementedError("OPLS-AA not implemented for squalane")
return chain.id
def create_system(self, nonbondedMethod=NoCutoff, nonbondedCutoff=1.0*nanometer,
constraints=None, rigidWater=True, implicitSolvent=None,
soluteDielectric=1.0, solventDielectric=78.5,
ewaldErrorTolerance=0.0005, removeCMMotion=True,
hydrogenMass=None):
self._create_dodecane_acrylate_topology()
return self.forceField.createSystem(self._topology, nonbondedMethod=nonbondedMethod,
nonbondedCutoff=nonbondedCutoff,
constraints=constraints, rigidWater=rigidWater,
implicitSolvent=implicitSolvent, soluteDielectric=soluteDielectric,
solventDielectric=solventDielectric, ewaldErrorTolerance=ewaldErrorTolerance,
removeCMMotion=removeCMMotion, hydrogenMass=hydrogenMass)
| 43.358824 | 131 | 0.592593 | from __future__ import absolute_import
__author__ = "Charles Li"
__version__ = "1.0"
import os
from ast import literal_eval
import numpy as np
from simtk.openmm.app import Element, ForceField, NoCutoff, Topology
from simtk.unit import nanometer
from ._options import _Options
from ._system_topology_chain import ChainOptions, BranchedChainOptions
class _TopologyOptions(_Options):
_SECTION_NAME = '_Topology'
def __init__(self, system_options):
super(_TopologyOptions, self).__init__()
self.system_options = system_options
self._topology = None
def _create_filepath(self, filepath):
directory = self.system_options.input_options.directory
return os.path.join(directory, filepath)
def topology(self):
return self._topology
def create_system(self, nonbondedMethod=NoCutoff, nonbondedCutoff=1.0*nanometer,
constraints=None, rigidWater=True, implicitSolvent=None,
soluteDielectric=1.0, solventDielectric=78.5,
ewaldErrorTolerance=0.0005, removeCMMotion=True,
hydrogenMass=None):
pass
class AmberTopologyOptions(_TopologyOptions):
_SECTION_NAME = 'AmberTopology'
def __init__(self, system_options):
super(AmberTopologyOptions, self).__init__(system_options)
raise NotImplementedError("'{}' is not supported yet.".format(self._SECTION_NAME))
class GromacsTopologyOptions(_TopologyOptions):
_SECTION_NAME = 'GromacsTopology'
def __init__(self, system_options):
super(GromacsTopologyOptions, self).__init__(system_options)
self.topFilename = None
self.groFilename = None
self._gromacs_topology = None
def _create_options(self):
super(GromacsTopologyOptions, self)._create_options()
self._OPTIONS['topFilename'] = self._parse_top_filename
self._OPTIONS['groFilename'] = self._parse_gro_filename
def _check_for_incomplete_input(self):
if self.topFilename is None:
self._incomplete_error('topFilename')
if self.groFilename is None:
self._incomplete_error('groFilename')
def _parse_top_filename(self, *args):
self.topFilename = self._create_filepath(args[0])
def _parse_gro_filename(self, *args):
self.groFilename = self._create_filepath(args[0])
def topology(self):
self._create_gromacs_topology()
return self._gromacs_topology.topology
def _create_gromacs_topology(self):
from parmed import gromacs
if self._gromacs_topology is None:
gro = gromacs.GromacsGroFile.parse(self.groFilename)
self._gromacs_topology = gromacs.GromacsTopologyFile(self.topFilename)
self._gromacs_topology.box = gro.box
def create_system(self, nonbondedMethod=NoCutoff, nonbondedCutoff=1.0*nanometer,
constraints=None, rigidWater=True, implicitSolvent=None,
soluteDielectric=1.0, solventDielectric=78.5,
ewaldErrorTolerance=0.0005, removeCMMotion=True,
hydrogenMass=None):
self._create_gromacs_topology()
return self._gromacs_topology.createSystem(nonbondedMethod=nonbondedMethod,
nonbondedCutoff=nonbondedCutoff,
constraints=constraints, rigidWater=rigidWater, implicitSolvent=implicitSolvent,
soluteDielectric=soluteDielectric, solventDielectric=solventDielectric,
ewaldErrorTolerance=ewaldErrorTolerance, removeCMMotion=removeCMMotion,
hydrogenMass=hydrogenMass)
class DodecaneAcrylateTopologyOptions(_TopologyOptions):
_SECTION_NAME = "DodecaneAcrylateTopology"
data_directory = os.path.join(os.path.dirname(__file__), 'data')
TRAPPEUA_FF_PATH = os.path.join(data_directory, "trappeua-acrylates.xml")
OPLS_AA_PATH = os.path.join(data_directory, "opls_aa.xml")
def __init__(self, system_options):
super(DodecaneAcrylateTopologyOptions, self).__init__(system_options)
self.forceField_str = "TraPPE-UA"
self.forceField = ForceField(self.TRAPPEUA_FF_PATH)
self.numDodecane = 0
self.numSqualane = 0
self.dodecaneInstructions = None
self.box_vectors = None
self.chains = []
self.branched_chains = []
self.id_to_sequence = {}
def _create_options(self):
super(DodecaneAcrylateTopologyOptions, self)._create_options()
self._OPTIONS['forceField'] = self._parse_force_field
self._OPTIONS['numDodecane'] = self._parse_num_dodecane
self._OPTIONS['numSqualane'] = self._parse_num_squalane
self._OPTIONS['dodecaneInstructions'] = self._parse_dodecane_instructions
self._OPTIONS['box'] = self._parse_box
def _create_sections(self):
super(DodecaneAcrylateTopologyOptions, self)._create_sections()
self._SECTIONS['chain'] = self._parse_chain
self._SECTIONS['BranchedChain'] = self._parse_branched_chain
def _parse_force_field(self, *args):
if args[0] == 'TraPPE-UA':
self.forceField = ForceField(self.TRAPPEUA_FF_PATH)
elif args[0] == 'OPLS-AA':
self.forceField = ForceField(self.OPLS_AA_PATH)
else:
raise ValueError("Invalid force field.")
self.forceField_str = args[0]
def _parse_num_dodecane(self, *args):
self.numDodecane = literal_eval(args[0])
def _parse_num_squalane(self, *args):
self.numSqualane = literal_eval(args[0])
def _parse_dodecane_instructions(self, *args):
self.dodecaneInstructions = [instruction.strip() for instruction in args[0].split('/')]
def _parse_box(self, *args):
a, b, c = args[0].split(' ')
self.box_vectors = np.array([[literal_eval(a), 0.0, 0.0],
[0.0, literal_eval(b), 0.0],
[0.0, 0.0, literal_eval(c)]])*nanometer
def _parse_chain(self, *args):
line_deque = args[1]
chain_options = ChainOptions(self)
chain_options.parse(line_deque.popleft())
self.chains.append(chain_options)
def _parse_branched_chain(self, *args):
line_deque = args[1]
branched_chain_options = BranchedChainOptions(self)
branched_chain_options.parse(line_deque.popleft())
self.branched_chains.append(branched_chain_options)
def topology(self):
self._create_dodecane_acrylate_topology()
return self._topology
def _create_dodecane_acrylate_topology(self):
if self._topology is None:
topology = Topology()
if self.box_vectors is not None:
topology.setPeriodicBoxVectors(self.box_vectors)
for chain_option in self.chains:
id_to_sequence = chain_option.add_chain_to_topology(topology)
self.id_to_sequence.update(id_to_sequence)
for branched_chain_option in self.branched_chains:
branched_chain_option.add_chain_to_topology(topology)
for _ in range(self.numDodecane):
dodecane_id = self._add_dodecane_to_topology(topology)
self.id_to_sequence[dodecane_id] = "C12"
for _ in range(self.numSqualane):
squalane_id = self._add_squalane_to_topology(topology)
self.id_to_sequence[squalane_id] = "squalane"
self._topology = topology
def _add_dodecane_to_topology(self, topology):
carbon_element = Element.getBySymbol('C')
hydrogen_element = Element.getBySymbol('H')
chain = topology.addChain("{}-C12".format(topology.getNumChains() + 1))
residue = topology.addResidue("C12", chain)
prev_atom = topology.addAtom("C", carbon_element, residue)
if self.forceField_str == "TraPPE-UA":
for i in range(11):
curr_atom = topology.addAtom("C{}".format(i + 1), carbon_element, residue)
topology.addBond(prev_atom, curr_atom)
prev_atom = curr_atom
else:
H_counter = 0
for _ in range(3):
H = topology.addAtom("H{}".format(H_counter), hydrogen_element, residue)
topology.addBond(H, prev_atom)
for i in range(11):
curr_atom = topology.addAtom("C{}".format(i + 1), carbon_element, residue)
topology.addBond(prev_atom, curr_atom)
for _ in range(2):
H = topology.addAtom("H{}".format(H_counter), hydrogen_element, residue)
topology.addBond(H, curr_atom)
prev_atom = curr_atom
H = topology.addAtom("H{}".format(H_counter), hydrogen_element, residue)
topology.addBond(H, prev_atom)
return chain.id
def _add_squalane_to_topology(self, topology):
carbon_element = Element.getBySymbol('C')
hydrogen_element = Element.getBySymbol('H')
chain = topology.addChain("{}-squalane".format(topology.getNumChains() + 1))
residue = topology.addResidue("squalane", chain)
prev_atom = None
if self.forceField_str == "TraPPE-UA":
atom_index = 0
for _ in range(3):
C1 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
if prev_atom is not None:
topology.addBond(prev_atom, C1)
C2 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C1, C2)
C3 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C2, C3)
C4 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C2, C4)
C5 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C4, C5)
prev_atom = C5
for _ in range(3):
C1 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(prev_atom, C1)
C2 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C1, C2)
C3 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C2, C3)
C4 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C3, C4)
C5 = topology.addAtom("C{}".format(atom_index), carbon_element, residue)
atom_index += 1
topology.addBond(C3, C5)
prev_atom = C5
else:
raise NotImplementedError("OPLS-AA not implemented for squalane")
return chain.id
def create_system(self, nonbondedMethod=NoCutoff, nonbondedCutoff=1.0*nanometer,
constraints=None, rigidWater=True, implicitSolvent=None,
soluteDielectric=1.0, solventDielectric=78.5,
ewaldErrorTolerance=0.0005, removeCMMotion=True,
hydrogenMass=None):
self._create_dodecane_acrylate_topology()
return self.forceField.createSystem(self._topology, nonbondedMethod=nonbondedMethod,
nonbondedCutoff=nonbondedCutoff,
constraints=constraints, rigidWater=rigidWater,
implicitSolvent=implicitSolvent, soluteDielectric=soluteDielectric,
solventDielectric=solventDielectric, ewaldErrorTolerance=ewaldErrorTolerance,
removeCMMotion=removeCMMotion, hydrogenMass=hydrogenMass)
| true | true |
1c396ef7bd19ebc1898c3ce8edba54d4d9b77dfc | 424 | py | Python | tests/src/year2017/test_day09b.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 10 | 2017-12-11T17:54:52.000Z | 2021-12-09T20:16:30.000Z | tests/src/year2017/test_day09b.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 260 | 2015-12-09T11:03:03.000Z | 2021-12-12T14:32:23.000Z | tests/src/year2017/test_day09b.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | null | null | null | """2017 - Day 9 Part 2: Stream Processing tests."""
import pytest
from src.year2017.day09b import solve
@pytest.mark.parametrize(
("stream", "expected"),
[
("<>", 0),
("<random characters>", 17),
("<<<<>", 3),
("<{!>}>", 2),
("<!!>", 0),
("<!!!>>", 0),
('<{o"i!a,<{i<a>', 10),
],
)
def test_solve(stream, expected):
assert solve(stream) == expected
| 20.190476 | 51 | 0.459906 | import pytest
from src.year2017.day09b import solve
@pytest.mark.parametrize(
("stream", "expected"),
[
("<>", 0),
("<random characters>", 17),
("<<<<>", 3),
("<{!>}>", 2),
("<!!>", 0),
("<!!!>>", 0),
('<{o"i!a,<{i<a>', 10),
],
)
def test_solve(stream, expected):
assert solve(stream) == expected
| true | true |
1c396efc3cf4df445e482ec254bd83587ce2d238 | 519 | py | Python | backend/backend/asgi.py | Dokeey/flexchat | 1e62f8ea054bd526cd5bf1c0db12f986b12ed559 | [
"MIT"
] | 1 | 2020-09-30T10:15:05.000Z | 2020-09-30T10:15:05.000Z | backend/backend/asgi.py | Dokeey/flexchat | 1e62f8ea054bd526cd5bf1c0db12f986b12ed559 | [
"MIT"
] | 7 | 2020-08-31T09:56:26.000Z | 2021-03-01T14:53:00.000Z | backend/backend/asgi.py | Dokeey/flexchat | 1e62f8ea054bd526cd5bf1c0db12f986b12ed559 | [
"MIT"
] | null | null | null | """
ASGI config for backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
import django
from channels.routing import get_default_application
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings.prod')
django.setup()
# application = get_asgi_application()
application = get_default_application() | 25.95 | 78 | 0.801541 |
import os
import django
from channels.routing import get_default_application
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings.prod')
django.setup()
application = get_default_application() | true | true |
1c396f24dcb59ef5b6b1524ecddeb41ddb5c93bd | 172 | py | Python | api/urls.py | MECKEM-COV-19/backend | c0686f32f98b3acd5dc028d8a054089694654a07 | [
"MIT"
] | null | null | null | api/urls.py | MECKEM-COV-19/backend | c0686f32f98b3acd5dc028d8a054089694654a07 | [
"MIT"
] | 4 | 2020-03-22T12:27:45.000Z | 2021-06-10T22:44:00.000Z | api/urls.py | MECKEM-COV-19/backend | c0686f32f98b3acd5dc028d8a054089694654a07 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from graphene_django.views import GraphQLView
urlpatterns = [
path('graphql/', GraphQLView.as_view(graphiql=True)),
] | 21.5 | 57 | 0.767442 | from django.urls import path
from . import views
from graphene_django.views import GraphQLView
urlpatterns = [
path('graphql/', GraphQLView.as_view(graphiql=True)),
] | true | true |
1c396fe42c1941a4fdbd1abdeeb2b7d8bceff0c2 | 14,588 | py | Python | keras_applications/inception_v3_nadee.py | nadee13/keras_applications | 65ca5b17c71b3b3f0191576c03353cc26af9c289 | [
"MIT"
] | null | null | null | keras_applications/inception_v3_nadee.py | nadee13/keras_applications | 65ca5b17c71b3b3f0191576c03353cc26af9c289 | [
"MIT"
] | null | null | null | keras_applications/inception_v3_nadee.py | nadee13/keras_applications | 65ca5b17c71b3b3f0191576c03353cc26af9c289 | [
"MIT"
] | null | null | null | """Inception V3 model for Keras.
Note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function is also different (same as Xception).
# Reference
- [Rethinking the Inception Architecture for Computer Vision](
http://arxiv.org/abs/1512.00567) (CVPR 2016)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
WEIGHTS_PATH = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
backend = None
layers = None
models = None
keras_utils = None
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if backend.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = layers.Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = layers.Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 75.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 288
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool],
axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2],
axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation=None, name='logits')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = models.Model(inputs, x, name='inception_v3')
# Load weights.
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x, **kwargs):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
| 35.754902 | 80 | 0.603304 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from . import get_submodules_from_kwargs
from . import imagenet_utils
from .imagenet_utils import decode_predictions
from .imagenet_utils import _obtain_input_shape
WEIGHTS_PATH = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
WEIGHTS_PATH_NO_TOP = (
'https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.5/'
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5')
backend = None
layers = None
models = None
keras_utils = None
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if backend.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = layers.Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = layers.BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = layers.Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=75,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if backend.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='valid')
x = conv2d_bn(x, 32, 3, 3, padding='valid')
x = conv2d_bn(x, 64, 3, 3)
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, padding='valid')
x = conv2d_bn(x, 192, 3, 3, padding='valid')
x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed3')
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='valid')
branch_pool = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool],
axis=channel_axis,
name='mixed8')
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2],
axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = layers.AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation=None, name='logits')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
if input_tensor is not None:
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = models.Model(inputs, x, name='inception_v3')
if weights == 'imagenet':
if include_top:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = keras_utils.get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x, **kwargs):
return imagenet_utils.preprocess_input(x, mode='tf', **kwargs)
| true | true |
1c3972136fc6ff85b47ef462a1a1317f6bf42fed | 717 | py | Python | setup.py | CIIRC-ISI/PyAutomationML | 19272934dfbb15a2665b8c55058f94b0e55d2879 | [
"BSD-3-Clause"
] | null | null | null | setup.py | CIIRC-ISI/PyAutomationML | 19272934dfbb15a2665b8c55058f94b0e55d2879 | [
"BSD-3-Clause"
] | null | null | null | setup.py | CIIRC-ISI/PyAutomationML | 19272934dfbb15a2665b8c55058f94b0e55d2879 | [
"BSD-3-Clause"
] | null | null | null | import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name='pyautomationml',
description='Library for processing AutomationML files in python',
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/CIIRC-ISI/PyAML",
author="CIIRC ISI Microteam",
author_email="doudape1@fel.cvut.cz",
version='1.1.0',
packages=['pyautomationml'],
license="BSD",
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8"],
install_requires=[
'lxml',
],
)
| 27.576923 | 70 | 0.658298 | import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name='pyautomationml',
description='Library for processing AutomationML files in python',
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/CIIRC-ISI/PyAML",
author="CIIRC ISI Microteam",
author_email="doudape1@fel.cvut.cz",
version='1.1.0',
packages=['pyautomationml'],
license="BSD",
classifiers=[
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8"],
install_requires=[
'lxml',
],
)
| true | true |
1c3972452c8a9c15f17df7204dfd01c76b40005d | 1,344 | py | Python | to_images.py | qhjqhj00/ConV2020 | 680c4b8eb9e9568471e414f6e763e838bca0025e | [
"Apache-2.0"
] | null | null | null | to_images.py | qhjqhj00/ConV2020 | 680c4b8eb9e9568471e414f6e763e838bca0025e | [
"Apache-2.0"
] | null | null | null | to_images.py | qhjqhj00/ConV2020 | 680c4b8eb9e9568471e414f6e763e838bca0025e | [
"Apache-2.0"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import pandas as pd
from matplotlib import pyplot as plt
import os
from pypinyin import lazy_pinyin
from matplotlib.ticker import MaxNLocator
import re
target_dir = './res/'
picture = './images/'
func = lambda z:dict([(x, y) for y, x in z.items()])
def plot(x,y,t,a):
ax = plt.figure().gca()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.title(a)
plt.xlabel("time (month.day.hour)")
plt.ylabel(t)
plt.plot(x,y)
if not os.path.exists(picture+f'{a}/'):
os.mkdir(picture+f'{a}/')
plt.savefig(picture+f'{a}/{t}.png')
plt.close()
def time(t):
year = t[:4]
month = t[4:6]
day = t[6:8]
hour = t[8:10]
minute = t[10:]
return f'{month}.{day}.{hour}'
def to_images(data, t):
for l in data:
d = data[l].to_dict()
d = func(d)
y = list(d.keys())
x = list(d.values())
x = [time(t) for t in x]
x.insert(0,'')
y.insert(0,0)
a = ''.join(lazy_pinyin(l))
plot(x,y,t,a)
fields = '|'.join(['confirmedCount', 'deadCount', 'curedCount'])
for table in list(os.walk(target_dir))[0][2]:
t = re.findall(fields, table)
if len(t) > 0:
data = pd.read_csv(target_dir+table, '\t',header=0,index_col=0).T
to_images(data, t[0])
| 24.436364 | 73 | 0.570685 | import matplotlib
matplotlib.use('Agg')
import pandas as pd
from matplotlib import pyplot as plt
import os
from pypinyin import lazy_pinyin
from matplotlib.ticker import MaxNLocator
import re
target_dir = './res/'
picture = './images/'
func = lambda z:dict([(x, y) for y, x in z.items()])
def plot(x,y,t,a):
ax = plt.figure().gca()
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plt.title(a)
plt.xlabel("time (month.day.hour)")
plt.ylabel(t)
plt.plot(x,y)
if not os.path.exists(picture+f'{a}/'):
os.mkdir(picture+f'{a}/')
plt.savefig(picture+f'{a}/{t}.png')
plt.close()
def time(t):
year = t[:4]
month = t[4:6]
day = t[6:8]
hour = t[8:10]
minute = t[10:]
return f'{month}.{day}.{hour}'
def to_images(data, t):
for l in data:
d = data[l].to_dict()
d = func(d)
y = list(d.keys())
x = list(d.values())
x = [time(t) for t in x]
x.insert(0,'')
y.insert(0,0)
a = ''.join(lazy_pinyin(l))
plot(x,y,t,a)
fields = '|'.join(['confirmedCount', 'deadCount', 'curedCount'])
for table in list(os.walk(target_dir))[0][2]:
t = re.findall(fields, table)
if len(t) > 0:
data = pd.read_csv(target_dir+table, '\t',header=0,index_col=0).T
to_images(data, t[0])
| true | true |
1c39726e349a0005641f4c91d20c9491a42311a3 | 62,089 | py | Python | nipype/interfaces/freesurfer/model.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/freesurfer/model.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/freesurfer/model.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The freesurfer module provides basic functions for interfacing with
freesurfer tools.
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
from ...utils.filemanip import fname_presuffix, split_filename
from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath,
Directory, isdefined)
from .base import FSCommand, FSTraitedSpec
from .utils import copy2subjdir
__docformat__ = 'restructuredtext'
class MRISPreprocInputSpec(FSTraitedSpec):
out_file = File(argstr='--out %s', genfile=True, desc='output filename')
target = traits.Str(
argstr='--target %s', mandatory=True, desc='target subject name')
hemi = traits.Enum(
'lh',
'rh',
argstr='--hemi %s',
mandatory=True,
desc='hemisphere for source and target')
surf_measure = traits.Str(
argstr='--meas %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Use subject/surf/hemi.surf_measure as input')
surf_area = traits.Str(
argstr='--area %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc=
'Extract vertex area from subject/surf/hemi.surfname to use as input.')
subjects = traits.List(
argstr='--s %s...',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='subjects from who measures are calculated')
fsgd_file = File(
exists=True,
argstr='--fsgd %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='specify subjects using fsgd file')
subject_file = File(
exists=True,
argstr='--f %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='file specifying subjects separated by white space')
surf_measure_file = InputMultiPath(
File(exists=True),
argstr='--is %s...',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='file alternative to surfmeas, still requires list of subjects')
source_format = traits.Str(argstr='--srcfmt %s', desc='source format')
surf_dir = traits.Str(
argstr='--surfdir %s', desc='alternative directory (instead of surf)')
vol_measure_file = InputMultiPath(
traits.Tuple(File(exists=True), File(exists=True)),
argstr='--iv %s %s...',
desc='list of volume measure and reg file tuples')
proj_frac = traits.Float(
argstr='--projfrac %s', desc='projection fraction for vol2surf')
fwhm = traits.Float(
argstr='--fwhm %f',
xor=['num_iters'],
desc='smooth by fwhm mm on the target surface')
num_iters = traits.Int(
argstr='--niters %d',
xor=['fwhm'],
desc='niters : smooth by niters on the target surface')
fwhm_source = traits.Float(
argstr='--fwhm-src %f',
xor=['num_iters_source'],
desc='smooth by fwhm mm on the source surface')
num_iters_source = traits.Int(
argstr='--niterssrc %d',
xor=['fwhm_source'],
desc='niters : smooth by niters on the source surface')
smooth_cortex_only = traits.Bool(
argstr='--smooth-cortex-only',
desc='only smooth cortex (ie, exclude medial wall)')
class MRISPreprocOutputSpec(TraitedSpec):
out_file = File(desc='preprocessed output file')
class MRISPreproc(FSCommand):
"""Use FreeSurfer mris_preproc to prepare a group of contrasts for
a second level analysis
Examples
--------
>>> preproc = MRISPreproc()
>>> preproc.inputs.target = 'fsaverage'
>>> preproc.inputs.hemi = 'lh'
>>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \
('cont1a.nii', 'register.dat')]
>>> preproc.inputs.out_file = 'concatenated_file.mgz'
>>> preproc.cmdline
'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat'
"""
_cmd = 'mris_preproc'
input_spec = MRISPreprocInputSpec
output_spec = MRISPreprocOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.out_file
outputs['out_file'] = outfile
if not isdefined(outfile):
outputs['out_file'] = os.path.join(
os.getcwd(), 'concat_%s_%s.mgz' % (self.inputs.hemi,
self.inputs.target))
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class MRISPreprocReconAllInputSpec(MRISPreprocInputSpec):
surf_measure_file = File(
exists=True,
argstr='--meas %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='file necessary for surfmeas')
surfreg_files = InputMultiPath(
File(exists=True),
argstr="--surfreg %s",
requires=['lh_surfreg_target', 'rh_surfreg_target'],
desc="lh and rh input surface registration files")
lh_surfreg_target = File(
desc="Implicit target surface registration file",
requires=['surfreg_files'])
rh_surfreg_target = File(
desc="Implicit target surface registration file",
requires=['surfreg_files'])
subject_id = traits.String(
'subject_id',
argstr='--s %s',
usedefault=True,
xor=('subjects', 'fsgd_file', 'subject_file', 'subject_id'),
desc='subject from whom measures are calculated')
copy_inputs = traits.Bool(
desc="If running as a node, set this to True " +
"this will copy some implicit inputs to the " + "node directory.")
class MRISPreprocReconAll(MRISPreproc):
"""Extends MRISPreproc to allow it to be used in a recon-all workflow
Examples
========
>>> preproc = MRISPreprocReconAll()
>>> preproc.inputs.target = 'fsaverage'
>>> preproc.inputs.hemi = 'lh'
>>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \
('cont1a.nii', 'register.dat')]
>>> preproc.inputs.out_file = 'concatenated_file.mgz'
>>> preproc.cmdline
'mris_preproc --hemi lh --out concatenated_file.mgz --s subject_id --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat'
"""
input_spec = MRISPreprocReconAllInputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
if isdefined(self.inputs.surf_dir):
folder = self.inputs.surf_dir
else:
folder = 'surf'
if isdefined(self.inputs.surfreg_files):
for surfreg in self.inputs.surfreg_files:
basename = os.path.basename(surfreg)
copy2subjdir(self, surfreg, folder, basename)
if basename.startswith('lh.'):
copy2subjdir(
self,
self.inputs.lh_surfreg_target,
folder,
basename,
subject_id=self.inputs.target)
else:
copy2subjdir(
self,
self.inputs.rh_surfreg_target,
folder,
basename,
subject_id=self.inputs.target)
if isdefined(self.inputs.surf_measure_file):
copy2subjdir(self, self.inputs.surf_measure_file, folder)
return super(MRISPreprocReconAll, self).run(**inputs)
def _format_arg(self, name, spec, value):
# mris_preproc looks for these files in the surf dir
if name == 'surfreg_files':
basename = os.path.basename(value[0])
return spec.argstr % basename.lstrip('rh.').lstrip('lh.')
if name == "surf_measure_file":
basename = os.path.basename(value)
return spec.argstr % basename.lstrip('rh.').lstrip('lh.')
return super(MRISPreprocReconAll, self)._format_arg(name, spec, value)
class GLMFitInputSpec(FSTraitedSpec):
glm_dir = traits.Str(
argstr='--glmdir %s', desc='save outputs to dir', genfile=True)
in_file = File(
desc='input 4D file', argstr='--y %s', mandatory=True, copyfile=False)
_design_xor = ('fsgd', 'design', 'one_sample')
fsgd = traits.Tuple(
File(exists=True),
traits.Enum('doss', 'dods'),
argstr='--fsgd %s %s',
xor=_design_xor,
desc='freesurfer descriptor file')
design = File(
exists=True,
argstr='--X %s',
xor=_design_xor,
desc='design matrix file')
contrast = InputMultiPath(
File(exists=True), argstr='--C %s...', desc='contrast file')
one_sample = traits.Bool(
argstr='--osgm',
xor=('one_sample', 'fsgd', 'design', 'contrast'),
desc='construct X and C as a one-sample group mean')
no_contrast_ok = traits.Bool(
argstr='--no-contrasts-ok',
desc='do not fail if no contrasts specified')
per_voxel_reg = InputMultiPath(
File(exists=True), argstr='--pvr %s...', desc='per-voxel regressors')
self_reg = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
argstr='--selfreg %d %d %d',
desc='self-regressor from index col row slice')
weighted_ls = File(
exists=True,
argstr='--wls %s',
xor=('weight_file', 'weight_inv', 'weight_sqrt'),
desc='weighted least squares')
fixed_fx_var = File(
exists=True, argstr='--yffxvar %s', desc='for fixed effects analysis')
fixed_fx_dof = traits.Int(
argstr='--ffxdof %d',
xor=['fixed_fx_dof_file'],
desc='dof for fixed effects analysis')
fixed_fx_dof_file = File(
argstr='--ffxdofdat %d',
xor=['fixed_fx_dof'],
desc='text file with dof for fixed effects analysis')
weight_file = File(
exists=True,
xor=['weighted_ls'],
desc='weight for each input at each voxel')
weight_inv = traits.Bool(
argstr='--w-inv', desc='invert weights', xor=['weighted_ls'])
weight_sqrt = traits.Bool(
argstr='--w-sqrt', desc='sqrt of weights', xor=['weighted_ls'])
fwhm = traits.Range(
low=0.0, argstr='--fwhm %f', desc='smooth input by fwhm')
var_fwhm = traits.Range(
low=0.0, argstr='--var-fwhm %f', desc='smooth variance by fwhm')
no_mask_smooth = traits.Bool(
argstr='--no-mask-smooth', desc='do not mask when smoothing')
no_est_fwhm = traits.Bool(
argstr='--no-est-fwhm', desc='turn off FWHM output estimation')
mask_file = File(exists=True, argstr='--mask %s', desc='binary mask')
label_file = File(
exists=True,
argstr='--label %s',
xor=['cortex'],
desc='use label as mask, surfaces only')
cortex = traits.Bool(
argstr='--cortex',
xor=['label_file'],
desc='use subjects ?h.cortex.label as label')
invert_mask = traits.Bool(argstr='--mask-inv', desc='invert mask')
prune = traits.Bool(
argstr='--prune',
desc=
'remove voxels that do not have a non-zero value at each frame (def)')
no_prune = traits.Bool(
argstr='--no-prune', xor=['prunethresh'], desc='do not prune')
prune_thresh = traits.Float(
argstr='--prune_thr %f',
xor=['noprune'],
desc='prune threshold. Default is FLT_MIN')
compute_log_y = traits.Bool(
argstr='--logy', desc='compute natural log of y prior to analysis')
save_estimate = traits.Bool(
argstr='--yhat-save', desc='save signal estimate (yhat)')
save_residual = traits.Bool(
argstr='--eres-save', desc='save residual error (eres)')
save_res_corr_mtx = traits.Bool(
argstr='--eres-scm',
desc='save residual error spatial correlation matrix (eres.scm). Big!')
surf = traits.Bool(
argstr="--surf %s %s %s",
requires=["subject_id", "hemi"],
desc="analysis is on a surface mesh")
subject_id = traits.Str(desc="subject id for surface geometry")
hemi = traits.Enum("lh", "rh", desc="surface hemisphere")
surf_geo = traits.Str(
"white",
usedefault=True,
desc="surface geometry name (e.g. white, pial)")
simulation = traits.Tuple(
traits.Enum('perm', 'mc-full', 'mc-z'),
traits.Int(min=1),
traits.Float,
traits.Str,
argstr='--sim %s %d %f %s',
desc='nulltype nsim thresh csdbasename')
sim_sign = traits.Enum(
'abs', 'pos', 'neg', argstr='--sim-sign %s', desc='abs, pos, or neg')
uniform = traits.Tuple(
traits.Float,
traits.Float,
argstr='--uniform %f %f',
desc='use uniform distribution instead of gaussian')
pca = traits.Bool(
argstr='--pca', desc='perform pca/svd analysis on residual')
calc_AR1 = traits.Bool(
argstr='--tar1', desc='compute and save temporal AR1 of residual')
save_cond = traits.Bool(
argstr='--save-cond',
desc='flag to save design matrix condition at each voxel')
vox_dump = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
argstr='--voxdump %d %d %d',
desc='dump voxel GLM and exit')
seed = traits.Int(argstr='--seed %d', desc='used for synthesizing noise')
synth = traits.Bool(argstr='--synth', desc='replace input with gaussian')
resynth_test = traits.Int(
argstr='--resynthtest %d', desc='test GLM by resynthsis')
profile = traits.Int(argstr='--profile %d', desc='niters : test speed')
force_perm = traits.Bool(
argstr='--perm-force',
desc='force perumtation test, even when design matrix is not orthog')
diag = traits.Int('--diag %d', desc='Gdiag_no : set diagnositc level')
diag_cluster = traits.Bool(
argstr='--diag-cluster',
desc='save sig volume and exit from first sim loop')
debug = traits.Bool(argstr='--debug', desc='turn on debugging')
check_opts = traits.Bool(
argstr='--checkopts',
desc="don't run anything, just check options and exit")
allow_repeated_subjects = traits.Bool(
argstr='--allowsubjrep',
desc=
'allow subject names to repeat in the fsgd file (must appear before --fsgd'
)
allow_ill_cond = traits.Bool(
argstr='--illcond', desc='allow ill-conditioned design matrices')
sim_done_file = File(
argstr='--sim-done %s', desc='create file when simulation finished')
class GLMFitOutputSpec(TraitedSpec):
glm_dir = Directory(exists=True, desc="output directory")
beta_file = File(exists=True, desc="map of regression coefficients")
error_file = File(desc="map of residual error")
error_var_file = File(desc="map of residual error variance")
error_stddev_file = File(desc="map of residual error standard deviation")
estimate_file = File(desc="map of the estimated Y values")
mask_file = File(desc="map of the mask used in the analysis")
fwhm_file = File(desc="text file with estimated smoothness")
dof_file = File(
desc="text file with effective degrees-of-freedom for the analysis")
gamma_file = OutputMultiPath(
desc="map of contrast of regression coefficients")
gamma_var_file = OutputMultiPath(
desc="map of regression contrast variance")
sig_file = OutputMultiPath(desc="map of F-test significance (in -log10p)")
ftest_file = OutputMultiPath(desc="map of test statistic values")
spatial_eigenvectors = File(
desc="map of spatial eigenvectors from residual PCA")
frame_eigenvectors = File(
desc="matrix of frame eigenvectors from residual PCA")
singular_values = File(desc="matrix singular values from residual PCA")
svd_stats_file = File(desc="text file summarizing the residual PCA")
class GLMFit(FSCommand):
"""Use FreeSurfer's mri_glmfit to specify and estimate a general linear model.
Examples
--------
>>> glmfit = GLMFit()
>>> glmfit.inputs.in_file = 'functional.nii'
>>> glmfit.inputs.one_sample = True
>>> glmfit.cmdline == 'mri_glmfit --glmdir %s --y functional.nii --osgm'%os.getcwd()
True
"""
_cmd = 'mri_glmfit'
input_spec = GLMFitInputSpec
output_spec = GLMFitOutputSpec
def _format_arg(self, name, spec, value):
if name == "surf":
_si = self.inputs
return spec.argstr % (_si.subject_id, _si.hemi, _si.surf_geo)
return super(GLMFit, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
# Get the top-level output directory
if not isdefined(self.inputs.glm_dir):
glmdir = os.getcwd()
else:
glmdir = os.path.abspath(self.inputs.glm_dir)
outputs["glm_dir"] = glmdir
# Assign the output files that always get created
outputs["beta_file"] = os.path.join(glmdir, "beta.mgh")
outputs["error_var_file"] = os.path.join(glmdir, "rvar.mgh")
outputs["error_stddev_file"] = os.path.join(glmdir, "rstd.mgh")
outputs["mask_file"] = os.path.join(glmdir, "mask.mgh")
outputs["fwhm_file"] = os.path.join(glmdir, "fwhm.dat")
outputs["dof_file"] = os.path.join(glmdir, "dof.dat")
# Assign the conditional outputs
if isdefined(self.inputs.save_residual) and self.inputs.save_residual:
outputs["error_file"] = os.path.join(glmdir, "eres.mgh")
if isdefined(self.inputs.save_estimate) and self.inputs.save_estimate:
outputs["estimate_file"] = os.path.join(glmdir, "yhat.mgh")
# Get the contrast directory name(s)
if isdefined(self.inputs.contrast):
contrasts = []
for c in self.inputs.contrast:
if split_filename(c)[2] in [".mat", ".dat", ".mtx", ".con"]:
contrasts.append(split_filename(c)[1])
else:
contrasts.append(os.path.split(c)[1])
elif isdefined(self.inputs.one_sample) and self.inputs.one_sample:
contrasts = ["osgm"]
# Add in the contrast images
outputs["sig_file"] = [
os.path.join(glmdir, c, "sig.mgh") for c in contrasts
]
outputs["ftest_file"] = [
os.path.join(glmdir, c, "F.mgh") for c in contrasts
]
outputs["gamma_file"] = [
os.path.join(glmdir, c, "gamma.mgh") for c in contrasts
]
outputs["gamma_var_file"] = [
os.path.join(glmdir, c, "gammavar.mgh") for c in contrasts
]
# Add in the PCA results, if relevant
if isdefined(self.inputs.pca) and self.inputs.pca:
pcadir = os.path.join(glmdir, "pca-eres")
outputs["spatial_eigenvectors"] = os.path.join(pcadir, "v.mgh")
outputs["frame_eigenvectors"] = os.path.join(pcadir, "u.mtx")
outputs["singluar_values"] = os.path.join(pcadir, "sdiag.mat")
outputs["svd_stats_file"] = os.path.join(pcadir, "stats.dat")
return outputs
def _gen_filename(self, name):
if name == 'glm_dir':
return os.getcwd()
return None
class OneSampleTTest(GLMFit):
def __init__(self, **kwargs):
super(OneSampleTTest, self).__init__(**kwargs)
self.inputs.one_sample = True
class BinarizeInputSpec(FSTraitedSpec):
in_file = File(
exists=True,
argstr='--i %s',
mandatory=True,
copyfile=False,
desc='input volume')
min = traits.Float(
argstr='--min %f', xor=['wm_ven_csf'], desc='min thresh')
max = traits.Float(
argstr='--max %f', xor=['wm_ven_csf'], desc='max thresh')
rmin = traits.Float(
argstr='--rmin %f', desc='compute min based on rmin*globalmean')
rmax = traits.Float(
argstr='--rmax %f', desc='compute max based on rmax*globalmean')
match = traits.List(
traits.Int, argstr='--match %d...', desc='match instead of threshold')
wm = traits.Bool(
argstr='--wm',
desc='set match vals to 2 and 41 (aseg for cerebral WM)')
ventricles = traits.Bool(
argstr='--ventricles',
desc='set match vals those for aseg ventricles+choroid (not 4th)')
wm_ven_csf = traits.Bool(
argstr='--wm+vcsf',
xor=['min', 'max'],
desc='WM and ventricular CSF, including choroid (not 4th)')
binary_file = File(
argstr='--o %s', genfile=True, desc='binary output volume')
out_type = traits.Enum(
'nii', 'nii.gz', 'mgz', argstr='', desc='output file type')
count_file = traits.Either(
traits.Bool,
File,
argstr='--count %s',
desc='save number of hits in ascii file (hits, ntotvox, pct)')
bin_val = traits.Int(
argstr='--binval %d',
desc='set vox within thresh to val (default is 1)')
bin_val_not = traits.Int(
argstr='--binvalnot %d',
desc='set vox outside range to val (default is 0)')
invert = traits.Bool(argstr='--inv', desc='set binval=0, binvalnot=1')
frame_no = traits.Int(
argstr='--frame %s', desc='use 0-based frame of input (default is 0)')
merge_file = File(
exists=True, argstr='--merge %s', desc='merge with mergevol')
mask_file = File(
exists=True, argstr='--mask maskvol', desc='must be within mask')
mask_thresh = traits.Float(
argstr='--mask-thresh %f', desc='set thresh for mask')
abs = traits.Bool(
argstr='--abs', desc='take abs of invol first (ie, make unsigned)')
bin_col_num = traits.Bool(
argstr='--bincol',
desc='set binarized voxel value to its column number')
zero_edges = traits.Bool(
argstr='--zero-edges', desc='zero the edge voxels')
zero_slice_edge = traits.Bool(
argstr='--zero-slice-edges', desc='zero the edge slice voxels')
dilate = traits.Int(
argstr='--dilate %d', desc='niters: dilate binarization in 3D')
erode = traits.Int(
argstr='--erode %d',
desc='nerode: erode binarization in 3D (after any dilation)')
erode2d = traits.Int(
argstr='--erode2d %d',
desc='nerode2d: erode binarization in 2D (after any 3D erosion)')
class BinarizeOutputSpec(TraitedSpec):
binary_file = File(exists=True, desc='binarized output volume')
count_file = File(desc='ascii file containing number of hits')
class Binarize(FSCommand):
"""Use FreeSurfer mri_binarize to threshold an input volume
Examples
--------
>>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii')
>>> binvol.cmdline
'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000'
"""
_cmd = 'mri_binarize'
input_spec = BinarizeInputSpec
output_spec = BinarizeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.binary_file
if not isdefined(outfile):
if isdefined(self.inputs.out_type):
outfile = fname_presuffix(
self.inputs.in_file,
newpath=os.getcwd(),
suffix='.'.join(('_thresh', self.inputs.out_type)),
use_ext=False)
else:
outfile = fname_presuffix(
self.inputs.in_file, newpath=os.getcwd(), suffix='_thresh')
outputs['binary_file'] = os.path.abspath(outfile)
value = self.inputs.count_file
if isdefined(value):
if isinstance(value, bool):
if value:
outputs['count_file'] = fname_presuffix(
self.inputs.in_file,
suffix='_count.txt',
newpath=os.getcwd(),
use_ext=False)
else:
outputs['count_file'] = value
return outputs
def _format_arg(self, name, spec, value):
if name == 'count_file':
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
if name == 'out_type':
return ''
return super(Binarize, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'binary_file':
return self._list_outputs()[name]
return None
class ConcatenateInputSpec(FSTraitedSpec):
in_files = InputMultiPath(
File(exists=True),
desc='Individual volumes to be concatenated',
argstr='--i %s...',
mandatory=True)
concatenated_file = File(
desc='Output volume', argstr='--o %s', genfile=True)
sign = traits.Enum(
'abs',
'pos',
'neg',
argstr='--%s',
desc='Take only pos or neg voxles from input, or take abs')
stats = traits.Enum(
'sum',
'var',
'std',
'max',
'min',
'mean',
argstr='--%s',
desc='Compute the sum, var, std, max, min or mean of the input volumes'
)
paired_stats = traits.Enum(
'sum',
'avg',
'diff',
'diff-norm',
'diff-norm1',
'diff-norm2',
argstr='--paired-%s',
desc='Compute paired sum, avg, or diff')
gmean = traits.Int(
argstr='--gmean %d',
desc='create matrix to average Ng groups, Nper=Ntot/Ng')
mean_div_n = traits.Bool(
argstr='--mean-div-n', desc='compute mean/nframes (good for var)')
multiply_by = traits.Float(
argstr='--mul %f', desc='Multiply input volume by some amount')
add_val = traits.Float(
argstr='--add %f', desc='Add some amount to the input volume')
multiply_matrix_file = File(
exists=True,
argstr='--mtx %s',
desc='Multiply input by an ascii matrix in file')
combine = traits.Bool(
argstr='--combine',
desc='Combine non-zero values into single frame volume')
keep_dtype = traits.Bool(
argstr='--keep-datatype',
desc='Keep voxelwise precision type (default is float')
max_bonfcor = traits.Bool(
argstr='--max-bonfcor',
desc='Compute max and bonferroni correct (assumes -log10(ps))')
max_index = traits.Bool(
argstr='--max-index',
desc='Compute the index of max voxel in concatenated volumes')
mask_file = File(
exists=True, argstr='--mask %s', desc='Mask input with a volume')
vote = traits.Bool(
argstr='--vote',
desc='Most frequent value at each voxel and fraction of occurances')
sort = traits.Bool(
argstr='--sort', desc='Sort each voxel by ascending frame value')
class ConcatenateOutputSpec(TraitedSpec):
concatenated_file = File(
exists=True, desc='Path/name of the output volume')
class Concatenate(FSCommand):
"""Use Freesurfer mri_concat to combine several input volumes
into one output volume. Can concatenate by frames, or compute
a variety of statistics on the input volumes.
Examples
--------
Combine two input volumes into one volume with two frames
>>> concat = Concatenate()
>>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii']
>>> concat.inputs.concatenated_file = 'bar.nii'
>>> concat.cmdline
'mri_concat --o bar.nii --i cont1.nii --i cont2.nii'
"""
_cmd = 'mri_concat'
input_spec = ConcatenateInputSpec
output_spec = ConcatenateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
fname = self.inputs.concatenated_file
if not isdefined(fname):
fname = 'concat_output.nii.gz'
outputs['concatenated_file'] = os.path.join(os.getcwd(), fname)
return outputs
def _gen_filename(self, name):
if name == 'concatenated_file':
return self._list_outputs()[name]
return None
class SegStatsInputSpec(FSTraitedSpec):
_xor_inputs = ('segmentation_file', 'annot', 'surf_label')
segmentation_file = File(
exists=True,
argstr='--seg %s',
xor=_xor_inputs,
mandatory=True,
desc='segmentation volume path')
annot = traits.Tuple(
traits.Str,
traits.Enum('lh', 'rh'),
traits.Str,
argstr='--annot %s %s %s',
xor=_xor_inputs,
mandatory=True,
desc='subject hemi parc : use surface parcellation')
surf_label = traits.Tuple(
traits.Str,
traits.Enum('lh', 'rh'),
traits.Str,
argstr='--slabel %s %s %s',
xor=_xor_inputs,
mandatory=True,
desc='subject hemi label : use surface label')
summary_file = File(
argstr='--sum %s',
genfile=True,
position=-1,
desc='Segmentation stats summary table file')
partial_volume_file = File(
exists=True, argstr='--pv %s', desc='Compensate for partial voluming')
in_file = File(
exists=True,
argstr='--i %s',
desc='Use the segmentation to report stats on this volume')
frame = traits.Int(
argstr='--frame %d', desc='Report stats on nth frame of input volume')
multiply = traits.Float(argstr='--mul %f', desc='multiply input by val')
calc_snr = traits.Bool(
argstr='--snr', desc='save mean/std as extra column in output table')
calc_power = traits.Enum(
'sqr',
'sqrt',
argstr='--%s',
desc='Compute either the sqr or the sqrt of the input')
_ctab_inputs = ('color_table_file', 'default_color_table',
'gca_color_table')
color_table_file = File(
exists=True,
argstr='--ctab %s',
xor=_ctab_inputs,
desc='color table file with seg id names')
default_color_table = traits.Bool(
argstr='--ctab-default',
xor=_ctab_inputs,
desc='use $FREESURFER_HOME/FreeSurferColorLUT.txt')
gca_color_table = File(
exists=True,
argstr='--ctab-gca %s',
xor=_ctab_inputs,
desc='get color table from GCA (CMA)')
segment_id = traits.List(
argstr='--id %s...', desc='Manually specify segmentation ids')
exclude_id = traits.Int(
argstr='--excludeid %d', desc='Exclude seg id from report')
exclude_ctx_gm_wm = traits.Bool(
argstr='--excl-ctxgmwm', desc='exclude cortical gray and white matter')
wm_vol_from_surf = traits.Bool(
argstr='--surf-wm-vol', desc='Compute wm volume from surf')
cortex_vol_from_surf = traits.Bool(
argstr='--surf-ctx-vol', desc='Compute cortex volume from surf')
non_empty_only = traits.Bool(
argstr='--nonempty', desc='Only report nonempty segmentations')
empty = traits.Bool(
argstr="--empty",
desc="Report on segmentations listed in the color table")
mask_file = File(
exists=True, argstr='--mask %s', desc='Mask volume (same size as seg')
mask_thresh = traits.Float(
argstr='--maskthresh %f',
desc='binarize mask with this threshold <0.5>')
mask_sign = traits.Enum(
'abs',
'pos',
'neg',
'--masksign %s',
desc='Sign for mask threshold: pos, neg, or abs')
mask_frame = traits.Int(
'--maskframe %d',
requires=['mask_file'],
desc='Mask with this (0 based) frame of the mask volume')
mask_invert = traits.Bool(
argstr='--maskinvert', desc='Invert binarized mask volume')
mask_erode = traits.Int(
argstr='--maskerode %d', desc='Erode mask by some amount')
brain_vol = traits.Enum(
'brain-vol-from-seg',
'brainmask',
argstr='--%s',
desc=
'Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``'
)
brainmask_file = File(
argstr="--brainmask %s",
exists=True,
desc=
"Load brain mask and compute the volume of the brain as the non-zero voxels in this volume"
)
etiv = traits.Bool(
argstr='--etiv', desc='Compute ICV from talairach transform')
etiv_only = traits.Enum(
'etiv',
'old-etiv',
'--%s-only',
desc='Compute etiv and exit. Use ``etiv`` or ``old-etiv``')
avgwf_txt_file = traits.Either(
traits.Bool,
File,
argstr='--avgwf %s',
desc='Save average waveform into file (bool or filename)')
avgwf_file = traits.Either(
traits.Bool,
File,
argstr='--avgwfvol %s',
desc='Save as binary volume (bool or filename)')
sf_avg_file = traits.Either(
traits.Bool,
File,
argstr='--sfavg %s',
desc='Save mean across space and time')
vox = traits.List(
traits.Int,
argstr='--vox %s',
desc='Replace seg with all 0s except at C R S (three int inputs)')
supratent = traits.Bool(
argstr="--supratent", desc="Undocumented input flag")
subcort_gm = traits.Bool(
argstr="--subcortgray",
desc="Compute volume of subcortical gray matter")
total_gray = traits.Bool(
argstr="--totalgray", desc="Compute volume of total gray matter")
euler = traits.Bool(
argstr="--euler",
desc=
"Write out number of defect holes in orig.nofix based on the euler number"
)
in_intensity = File(
argstr="--in %s --in-intensity-name %s",
desc="Undocumented input norm.mgz file")
intensity_units = traits.Enum(
'MR',
argstr="--in-intensity-units %s",
requires=["in_intensity"],
desc="Intensity units")
class SegStatsOutputSpec(TraitedSpec):
summary_file = File(
exists=True, desc='Segmentation summary statistics table')
avgwf_txt_file = File(
desc='Text file with functional statistics averaged over segs')
avgwf_file = File(
desc='Volume with functional statistics averaged over segs')
sf_avg_file = File(
desc='Text file with func statistics averaged over segs and framss')
class SegStats(FSCommand):
"""Use FreeSurfer mri_segstats for ROI analysis
Examples
--------
>>> import nipype.interfaces.freesurfer as fs
>>> ss = fs.SegStats()
>>> ss.inputs.annot = ('PWS04', 'lh', 'aparc')
>>> ss.inputs.in_file = 'functional.nii'
>>> ss.inputs.subjects_dir = '.'
>>> ss.inputs.avgwf_txt_file = 'avgwf.txt'
>>> ss.inputs.summary_file = 'summary.stats'
>>> ss.cmdline
'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats'
"""
_cmd = 'mri_segstats'
input_spec = SegStatsInputSpec
output_spec = SegStatsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.summary_file):
outputs['summary_file'] = os.path.abspath(self.inputs.summary_file)
else:
outputs['summary_file'] = os.path.join(os.getcwd(),
'summary.stats')
suffices = dict(
avgwf_txt_file='_avgwf.txt',
avgwf_file='_avgwf.nii.gz',
sf_avg_file='sfavg.txt')
if isdefined(self.inputs.segmentation_file):
_, src = os.path.split(self.inputs.segmentation_file)
if isdefined(self.inputs.annot):
src = '_'.join(self.inputs.annot)
if isdefined(self.inputs.surf_label):
src = '_'.join(self.inputs.surf_label)
for name, suffix in list(suffices.items()):
value = getattr(self.inputs, name)
if isdefined(value):
if isinstance(value, bool):
outputs[name] = fname_presuffix(
src, suffix=suffix, newpath=os.getcwd(), use_ext=False)
else:
outputs[name] = os.path.abspath(value)
return outputs
def _format_arg(self, name, spec, value):
if name in ('summary_file', 'avgwf_txt_file'):
if not isinstance(value, bool):
if not os.path.isabs(value):
value = os.path.join('.', value)
if name in ['avgwf_txt_file', 'avgwf_file', 'sf_avg_file']:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
elif name == 'in_intensity':
intensity_name = os.path.basename(
self.inputs.in_intensity).replace('.mgz', '')
return spec.argstr % (value, intensity_name)
return super(SegStats, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'summary_file':
return self._list_outputs()[name]
return None
class SegStatsReconAllInputSpec(SegStatsInputSpec):
# recon-all input requirements
subject_id = traits.String(
'subject_id',
usedefault=True,
argstr="--subject %s",
mandatory=True,
desc="Subject id being processed")
# implicit
ribbon = traits.File(
mandatory=True, exists=True, desc="Input file mri/ribbon.mgz")
presurf_seg = File(exists=True, desc="Input segmentation volume")
transform = File(mandatory=True, exists=True, desc="Input transform file")
lh_orig_nofix = File(
mandatory=True, exists=True, desc="Input lh.orig.nofix")
rh_orig_nofix = File(
mandatory=True, exists=True, desc="Input rh.orig.nofix")
lh_white = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/lh.white")
rh_white = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/rh.white")
lh_pial = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/lh.pial")
rh_pial = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/rh.pial")
aseg = File(exists=True, desc="Mandatory implicit input in 5.3")
copy_inputs = traits.Bool(desc="If running as a node, set this to True " +
"otherwise, this will copy the implicit inputs "
+ "to the node directory.")
class SegStatsReconAll(SegStats):
"""
This class inherits SegStats and modifies it for use in a recon-all workflow.
This implementation mandates implicit inputs that SegStats.
To ensure backwards compatability of SegStats, this class was created.
Examples
========
>>> from nipype.interfaces.freesurfer import SegStatsReconAll
>>> segstatsreconall = SegStatsReconAll()
>>> segstatsreconall.inputs.annot = ('PWS04', 'lh', 'aparc')
>>> segstatsreconall.inputs.avgwf_txt_file = 'avgwf.txt'
>>> segstatsreconall.inputs.summary_file = 'summary.stats'
>>> segstatsreconall.inputs.subject_id = '10335'
>>> segstatsreconall.inputs.ribbon = 'wm.mgz'
>>> segstatsreconall.inputs.transform = 'trans.mat'
>>> segstatsreconall.inputs.presurf_seg = 'wm.mgz'
>>> segstatsreconall.inputs.lh_orig_nofix = 'lh.pial'
>>> segstatsreconall.inputs.rh_orig_nofix = 'lh.pial'
>>> segstatsreconall.inputs.lh_pial = 'lh.pial'
>>> segstatsreconall.inputs.rh_pial = 'lh.pial'
>>> segstatsreconall.inputs.lh_white = 'lh.pial'
>>> segstatsreconall.inputs.rh_white = 'lh.pial'
>>> segstatsreconall.inputs.empty = True
>>> segstatsreconall.inputs.brain_vol = 'brain-vol-from-seg'
>>> segstatsreconall.inputs.exclude_ctx_gm_wm = True
>>> segstatsreconall.inputs.supratent = True
>>> segstatsreconall.inputs.subcort_gm = True
>>> segstatsreconall.inputs.etiv = True
>>> segstatsreconall.inputs.wm_vol_from_surf = True
>>> segstatsreconall.inputs.cortex_vol_from_surf = True
>>> segstatsreconall.inputs.total_gray = True
>>> segstatsreconall.inputs.euler = True
>>> segstatsreconall.inputs.exclude_id = 0
>>> segstatsreconall.cmdline
'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --supratent --totalgray --surf-wm-vol --sum ./summary.stats'
"""
input_spec = SegStatsReconAllInputSpec
output_spec = SegStatsOutputSpec
def _format_arg(self, name, spec, value):
if name == 'brainmask_file':
return spec.argstr % os.path.basename(value)
return super(SegStatsReconAll, self)._format_arg(name, spec, value)
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.lh_orig_nofix, 'surf',
'lh.orig.nofix')
copy2subjdir(self, self.inputs.rh_orig_nofix, 'surf',
'rh.orig.nofix')
copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white')
copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white')
copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial')
copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial')
copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz')
copy2subjdir(self, self.inputs.presurf_seg, 'mri',
'aseg.presurf.mgz')
copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.mgz')
copy2subjdir(self, self.inputs.transform,
os.path.join('mri', 'transforms'), 'talairach.xfm')
copy2subjdir(self, self.inputs.in_intensity, 'mri')
copy2subjdir(self, self.inputs.brainmask_file, 'mri')
return super(SegStatsReconAll, self).run(**inputs)
class Label2VolInputSpec(FSTraitedSpec):
label_file = InputMultiPath(
File(exists=True),
argstr='--label %s...',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
copyfile=False,
mandatory=True,
desc='list of label files')
annot_file = File(
exists=True,
argstr='--annot %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
requires=('subject_id', 'hemi'),
mandatory=True,
copyfile=False,
desc='surface annotation file')
seg_file = File(
exists=True,
argstr='--seg %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
copyfile=False,
desc='segmentation file')
aparc_aseg = traits.Bool(
argstr='--aparc+aseg',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
desc='use aparc+aseg.mgz in subjectdir as seg')
template_file = File(
exists=True,
argstr='--temp %s',
mandatory=True,
desc='output template volume')
reg_file = File(
exists=True,
argstr='--reg %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='tkregister style matrix VolXYZ = R*LabelXYZ')
reg_header = File(
exists=True,
argstr='--regheader %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='label template volume')
identity = traits.Bool(
argstr='--identity',
xor=('reg_file', 'reg_header', 'identity'),
desc='set R=I')
invert_mtx = traits.Bool(
argstr='--invertmtx', desc='Invert the registration matrix')
fill_thresh = traits.Range(
0., 1., argstr='--fillthresh %g', desc='thresh : between 0 and 1')
label_voxel_volume = traits.Float(
argstr='--labvoxvol %f', desc='volume of each label point (def 1mm3)')
proj = traits.Tuple(
traits.Enum('abs', 'frac'),
traits.Float,
traits.Float,
traits.Float,
argstr='--proj %s %f %f %f',
requires=('subject_id', 'hemi'),
desc='project along surface normal')
subject_id = traits.Str(argstr='--subject %s', desc='subject id')
hemi = traits.Enum(
'lh', 'rh', argstr='--hemi %s', desc='hemisphere to use lh or rh')
surface = traits.Str(
argstr='--surf %s', desc='use surface instead of white')
vol_label_file = File(argstr='--o %s', genfile=True, desc='output volume')
label_hit_file = File(
argstr='--hits %s', desc='file with each frame is nhits for a label')
map_label_stat = File(
argstr='--label-stat %s',
desc='map the label stats field into the vol')
native_vox2ras = traits.Bool(
argstr='--native-vox2ras',
desc='use native vox2ras xform instead of tkregister-style')
class Label2VolOutputSpec(TraitedSpec):
vol_label_file = File(exists=True, desc='output volume')
class Label2Vol(FSCommand):
"""Make a binary volume from a Freesurfer label
Examples
--------
>>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii')
>>> binvol.cmdline
'mri_label2vol --fillthresh 0.5 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii'
"""
_cmd = 'mri_label2vol'
input_spec = Label2VolInputSpec
output_spec = Label2VolOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.vol_label_file
if not isdefined(outfile):
for key in ['label_file', 'annot_file', 'seg_file']:
if isdefined(getattr(self.inputs, key)):
path = getattr(self.inputs, key)
if isinstance(path, list):
path = path[0]
_, src = os.path.split(path)
if isdefined(self.inputs.aparc_aseg):
src = 'aparc+aseg.mgz'
outfile = fname_presuffix(
src, suffix='_vol.nii.gz', newpath=os.getcwd(), use_ext=False)
outputs['vol_label_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'vol_label_file':
return self._list_outputs()[name]
return None
class MS_LDAInputSpec(FSTraitedSpec):
lda_labels = traits.List(
traits.Int(),
argstr='-lda %s',
mandatory=True,
minlen=2,
maxlen=2,
sep=' ',
desc='pair of class labels to optimize')
weight_file = traits.File(
argstr='-weight %s',
mandatory=True,
desc='filename for the LDA weights (input or output)')
vol_synth_file = traits.File(
exists=False,
argstr='-synth %s',
mandatory=True,
desc=('filename for the synthesized output '
'volume'))
label_file = traits.File(
exists=True, argstr='-label %s', desc='filename of the label volume')
mask_file = traits.File(
exists=True,
argstr='-mask %s',
desc='filename of the brain mask volume')
shift = traits.Int(
argstr='-shift %d',
desc='shift all values equal to the given value to zero')
conform = traits.Bool(
argstr='-conform',
desc=('Conform the input volumes (brain mask '
'typically already conformed)'))
use_weights = traits.Bool(
argstr='-W',
desc=('Use the weights from a previously '
'generated weight file'))
images = InputMultiPath(
File(exists=True),
argstr='%s',
mandatory=True,
copyfile=False,
desc='list of input FLASH images',
position=-1)
class MS_LDAOutputSpec(TraitedSpec):
weight_file = File(exists=True, desc='')
vol_synth_file = File(exists=True, desc='')
class MS_LDA(FSCommand):
"""Perform LDA reduction on the intensity space of an arbitrary # of FLASH images
Examples
--------
>>> grey_label = 2
>>> white_label = 3
>>> zero_value = 1
>>> optimalWeights = MS_LDA(lda_labels=[grey_label, white_label], \
label_file='label.mgz', weight_file='weights.txt', \
shift=zero_value, vol_synth_file='synth_out.mgz', \
conform=True, use_weights=True, \
images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz'])
>>> optimalWeights.cmdline
'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz'
"""
_cmd = 'mri_ms_LDA'
input_spec = MS_LDAInputSpec
output_spec = MS_LDAOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.output_synth):
outputs['vol_synth_file'] = os.path.abspath(
self.inputs.output_synth)
else:
outputs['vol_synth_file'] = os.path.abspath(
self.inputs.vol_synth_file)
if not isdefined(
self.inputs.use_weights) or self.inputs.use_weights is False:
outputs['weight_file'] = os.path.abspath(self.inputs.weight_file)
return outputs
def _verify_weights_file_exists(self):
if not os.path.exists(os.path.abspath(self.inputs.weight_file)):
raise traits.TraitError(
"MS_LDA: use_weights must accompany an existing weights file")
def _format_arg(self, name, spec, value):
if name == 'use_weights':
if self.inputs.use_weights is True:
self._verify_weights_file_exists()
else:
return ''
# TODO: Fix bug when boolean values are set explicitly to false
return super(MS_LDA, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
pass
class Label2LabelInputSpec(FSTraitedSpec):
hemisphere = traits.Enum(
'lh',
'rh',
argstr="--hemi %s",
mandatory=True,
desc="Input hemisphere")
subject_id = traits.String(
'subject_id',
usedefault=True,
argstr="--trgsubject %s",
mandatory=True,
desc="Target subject")
sphere_reg = File(
mandatory=True,
exists=True,
desc="Implicit input <hemisphere>.sphere.reg")
white = File(
mandatory=True, exists=True, desc="Implicit input <hemisphere>.white")
source_sphere_reg = File(
mandatory=True,
exists=True,
desc="Implicit input <hemisphere>.sphere.reg")
source_white = File(
mandatory=True, exists=True, desc="Implicit input <hemisphere>.white")
source_label = File(
argstr="--srclabel %s",
mandatory=True,
exists=True,
desc="Source label")
source_subject = traits.String(
argstr="--srcsubject %s", mandatory=True, desc="Source subject name")
# optional
out_file = File(
argstr="--trglabel %s",
name_source=['source_label'],
name_template='%s_converted',
hash_files=False,
keep_extension=True,
desc="Target label")
registration_method = traits.Enum(
'surface',
'volume',
usedefault=True,
argstr="--regmethod %s",
desc="Registration method")
copy_inputs = traits.Bool(
desc="If running as a node, set this to True." +
"This will copy the input files to the node " + "directory.")
class Label2LabelOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Output label')
class Label2Label(FSCommand):
"""
Converts a label in one subject's space to a label
in another subject's space using either talairach or spherical
as an intermediate registration space.
If a source mask is used, then the input label must have been
created from a surface (ie, the vertex numbers are valid). The
format can be anything supported by mri_convert or curv or paint.
Vertices in the source label that do not meet threshold in the
mask will be removed from the label.
Examples
--------
>>> from nipype.interfaces.freesurfer import Label2Label
>>> l2l = Label2Label()
>>> l2l.inputs.hemisphere = 'lh'
>>> l2l.inputs.subject_id = '10335'
>>> l2l.inputs.sphere_reg = 'lh.pial'
>>> l2l.inputs.white = 'lh.pial'
>>> l2l.inputs.source_subject = 'fsaverage'
>>> l2l.inputs.source_label = 'lh-pial.stl'
>>> l2l.inputs.source_white = 'lh.pial'
>>> l2l.inputs.source_sphere_reg = 'lh.pial'
>>> l2l.cmdline
'mri_label2label --hemi lh --trglabel lh-pial_converted.stl --regmethod surface --srclabel lh-pial.stl --srcsubject fsaverage --trgsubject 10335'
"""
_cmd = 'mri_label2label'
input_spec = Label2LabelInputSpec
output_spec = Label2LabelOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label',
self.inputs.out_file)
return outputs
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
hemi = self.inputs.hemisphere
copy2subjdir(self, self.inputs.sphere_reg, 'surf',
'{0}.sphere.reg'.format(hemi))
copy2subjdir(self, self.inputs.white, 'surf',
'{0}.white'.format(hemi))
copy2subjdir(
self,
self.inputs.source_sphere_reg,
'surf',
'{0}.sphere.reg'.format(hemi),
subject_id=self.inputs.source_subject)
copy2subjdir(
self,
self.inputs.source_white,
'surf',
'{0}.white'.format(hemi),
subject_id=self.inputs.source_subject)
# label dir must exist in order for output file to be written
label_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label')
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
return super(Label2Label, self).run(**inputs)
class Label2AnnotInputSpec(FSTraitedSpec):
# required
hemisphere = traits.Enum(
'lh',
'rh',
argstr="--hemi %s",
mandatory=True,
desc="Input hemisphere")
subject_id = traits.String(
'subject_id',
usedefault=True,
argstr="--s %s",
mandatory=True,
desc="Subject name/ID")
in_labels = traits.List(
argstr="--l %s...", mandatory=True, desc="List of input label files")
out_annot = traits.String(
argstr="--a %s",
mandatory=True,
desc="Name of the annotation to create")
orig = File(exists=True, mandatory=True, desc="implicit {hemisphere}.orig")
# optional
keep_max = traits.Bool(
argstr="--maxstatwinner", desc="Keep label with highest 'stat' value")
verbose_off = traits.Bool(
argstr="--noverbose",
desc="Turn off overlap and stat override messages")
color_table = File(
argstr="--ctab %s",
exists=True,
desc=
"File that defines the structure names, their indices, and their color"
)
copy_inputs = traits.Bool(
desc="copy implicit inputs and create a temp subjects_dir")
class Label2AnnotOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Output annotation file')
class Label2Annot(FSCommand):
"""
Converts a set of surface labels to an annotation file
Examples
--------
>>> from nipype.interfaces.freesurfer import Label2Annot
>>> l2a = Label2Annot()
>>> l2a.inputs.hemisphere = 'lh'
>>> l2a.inputs.subject_id = '10335'
>>> l2a.inputs.in_labels = ['lh.aparc.label']
>>> l2a.inputs.orig = 'lh.pial'
>>> l2a.inputs.out_annot = 'test'
>>> l2a.cmdline
'mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335'
"""
_cmd = 'mris_label2annot'
input_spec = Label2AnnotInputSpec
output_spec = Label2AnnotOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(
self,
self.inputs.orig,
folder='surf',
basename='{0}.orig'.format(self.inputs.hemisphere))
# label dir must exist in order for output file to be written
label_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label')
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
return super(Label2Annot, self).run(**inputs)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.join(
str(self.inputs.subjects_dir), str(self.inputs.subject_id),
'label',
str(self.inputs.hemisphere) + '.' + str(self.inputs.out_annot) +
'.annot')
return outputs
class SphericalAverageInputSpec(FSTraitedSpec):
out_file = File(
argstr="%s",
genfile=True,
exists=False,
position=-1,
desc="Output filename")
in_average = traits.Directory(
argstr="%s",
exists=True,
genfile=True,
position=-2,
desc="Average subject")
in_surf = File(
argstr="%s",
mandatory=True,
exists=True,
position=-3,
desc="Input surface file")
hemisphere = traits.Enum(
'lh',
'rh',
argstr="%s",
mandatory=True,
position=-4,
desc="Input hemisphere")
fname = traits.String(
argstr="%s",
mandatory=True,
position=-5,
desc="""Filename from the average subject directory.
Example: to use rh.entorhinal.label as the input label
filename, set fname to 'rh.entorhinal' and which to
'label'. The program will then search for
'{in_average}/label/rh.entorhinal.label'
""")
which = traits.Enum(
'coords',
'label',
'vals',
'curv',
'area',
argstr="%s",
mandatory=True,
position=-6,
desc="No documentation")
subject_id = traits.String(
argstr="-o %s", mandatory=True, desc="Output subject id")
# optional
erode = traits.Int(argstr="-erode %d", desc="Undocumented")
in_orig = File(
argstr="-orig %s", exists=True, desc="Original surface filename")
threshold = traits.Float(argstr="-t %.1f", desc="Undocumented")
class SphericalAverageOutputSpec(TraitedSpec):
out_file = File(exists=False, desc='Output label')
class SphericalAverage(FSCommand):
"""
This program will add a template into an average surface.
Examples
--------
>>> from nipype.interfaces.freesurfer import SphericalAverage
>>> sphericalavg = SphericalAverage()
>>> sphericalavg.inputs.out_file = 'test.out'
>>> sphericalavg.inputs.in_average = '.'
>>> sphericalavg.inputs.in_surf = 'lh.pial'
>>> sphericalavg.inputs.hemisphere = 'lh'
>>> sphericalavg.inputs.fname = 'lh.entorhinal'
>>> sphericalavg.inputs.which = 'label'
>>> sphericalavg.inputs.subject_id = '10335'
>>> sphericalavg.inputs.erode = 2
>>> sphericalavg.inputs.threshold = 5
>>> sphericalavg.cmdline
'mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out'
"""
_cmd = 'mris_spherical_average'
input_spec = SphericalAverageInputSpec
output_spec = SphericalAverageOutputSpec
def _format_arg(self, name, spec, value):
if name == 'in_orig' or name == 'in_surf':
surf = os.path.basename(value)
for item in ['lh.', 'rh.']:
surf = surf.replace(item, '')
return spec.argstr % surf
return super(SphericalAverage, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'in_average':
avg_subject = str(self.inputs.hemisphere) + '.EC_average'
avg_directory = os.path.join(self.inputs.subjects_dir, avg_subject)
if not os.path.isdir(avg_directory):
fs_home = os.path.abspath(os.environ.get('FREESURFER_HOME'))
return avg_subject
elif name == 'out_file':
return self._list_outputs()[name]
else:
return None
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
out_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label')
if isdefined(self.inputs.in_average):
basename = os.path.basename(self.inputs.in_average)
basename = basename.replace('_', '_exvivo_') + '.label'
else:
basename = str(
self.inputs.hemisphere) + '.EC_exvivo_average.label'
outputs['out_file'] = os.path.join(out_dir, basename)
return outputs
| 37.698239 | 239 | 0.598914 |
from __future__ import (print_function, division, unicode_literals,
absolute_import)
import os
from ...utils.filemanip import fname_presuffix, split_filename
from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath,
Directory, isdefined)
from .base import FSCommand, FSTraitedSpec
from .utils import copy2subjdir
__docformat__ = 'restructuredtext'
class MRISPreprocInputSpec(FSTraitedSpec):
out_file = File(argstr='--out %s', genfile=True, desc='output filename')
target = traits.Str(
argstr='--target %s', mandatory=True, desc='target subject name')
hemi = traits.Enum(
'lh',
'rh',
argstr='--hemi %s',
mandatory=True,
desc='hemisphere for source and target')
surf_measure = traits.Str(
argstr='--meas %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='Use subject/surf/hemi.surf_measure as input')
surf_area = traits.Str(
argstr='--area %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc=
'Extract vertex area from subject/surf/hemi.surfname to use as input.')
subjects = traits.List(
argstr='--s %s...',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='subjects from who measures are calculated')
fsgd_file = File(
exists=True,
argstr='--fsgd %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='specify subjects using fsgd file')
subject_file = File(
exists=True,
argstr='--f %s',
xor=('subjects', 'fsgd_file', 'subject_file'),
desc='file specifying subjects separated by white space')
surf_measure_file = InputMultiPath(
File(exists=True),
argstr='--is %s...',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='file alternative to surfmeas, still requires list of subjects')
source_format = traits.Str(argstr='--srcfmt %s', desc='source format')
surf_dir = traits.Str(
argstr='--surfdir %s', desc='alternative directory (instead of surf)')
vol_measure_file = InputMultiPath(
traits.Tuple(File(exists=True), File(exists=True)),
argstr='--iv %s %s...',
desc='list of volume measure and reg file tuples')
proj_frac = traits.Float(
argstr='--projfrac %s', desc='projection fraction for vol2surf')
fwhm = traits.Float(
argstr='--fwhm %f',
xor=['num_iters'],
desc='smooth by fwhm mm on the target surface')
num_iters = traits.Int(
argstr='--niters %d',
xor=['fwhm'],
desc='niters : smooth by niters on the target surface')
fwhm_source = traits.Float(
argstr='--fwhm-src %f',
xor=['num_iters_source'],
desc='smooth by fwhm mm on the source surface')
num_iters_source = traits.Int(
argstr='--niterssrc %d',
xor=['fwhm_source'],
desc='niters : smooth by niters on the source surface')
smooth_cortex_only = traits.Bool(
argstr='--smooth-cortex-only',
desc='only smooth cortex (ie, exclude medial wall)')
class MRISPreprocOutputSpec(TraitedSpec):
out_file = File(desc='preprocessed output file')
class MRISPreproc(FSCommand):
_cmd = 'mris_preproc'
input_spec = MRISPreprocInputSpec
output_spec = MRISPreprocOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.out_file
outputs['out_file'] = outfile
if not isdefined(outfile):
outputs['out_file'] = os.path.join(
os.getcwd(), 'concat_%s_%s.mgz' % (self.inputs.hemi,
self.inputs.target))
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._list_outputs()[name]
return None
class MRISPreprocReconAllInputSpec(MRISPreprocInputSpec):
surf_measure_file = File(
exists=True,
argstr='--meas %s',
xor=('surf_measure', 'surf_measure_file', 'surf_area'),
desc='file necessary for surfmeas')
surfreg_files = InputMultiPath(
File(exists=True),
argstr="--surfreg %s",
requires=['lh_surfreg_target', 'rh_surfreg_target'],
desc="lh and rh input surface registration files")
lh_surfreg_target = File(
desc="Implicit target surface registration file",
requires=['surfreg_files'])
rh_surfreg_target = File(
desc="Implicit target surface registration file",
requires=['surfreg_files'])
subject_id = traits.String(
'subject_id',
argstr='--s %s',
usedefault=True,
xor=('subjects', 'fsgd_file', 'subject_file', 'subject_id'),
desc='subject from whom measures are calculated')
copy_inputs = traits.Bool(
desc="If running as a node, set this to True " +
"this will copy some implicit inputs to the " + "node directory.")
class MRISPreprocReconAll(MRISPreproc):
input_spec = MRISPreprocReconAllInputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
if isdefined(self.inputs.surf_dir):
folder = self.inputs.surf_dir
else:
folder = 'surf'
if isdefined(self.inputs.surfreg_files):
for surfreg in self.inputs.surfreg_files:
basename = os.path.basename(surfreg)
copy2subjdir(self, surfreg, folder, basename)
if basename.startswith('lh.'):
copy2subjdir(
self,
self.inputs.lh_surfreg_target,
folder,
basename,
subject_id=self.inputs.target)
else:
copy2subjdir(
self,
self.inputs.rh_surfreg_target,
folder,
basename,
subject_id=self.inputs.target)
if isdefined(self.inputs.surf_measure_file):
copy2subjdir(self, self.inputs.surf_measure_file, folder)
return super(MRISPreprocReconAll, self).run(**inputs)
def _format_arg(self, name, spec, value):
if name == 'surfreg_files':
basename = os.path.basename(value[0])
return spec.argstr % basename.lstrip('rh.').lstrip('lh.')
if name == "surf_measure_file":
basename = os.path.basename(value)
return spec.argstr % basename.lstrip('rh.').lstrip('lh.')
return super(MRISPreprocReconAll, self)._format_arg(name, spec, value)
class GLMFitInputSpec(FSTraitedSpec):
glm_dir = traits.Str(
argstr='--glmdir %s', desc='save outputs to dir', genfile=True)
in_file = File(
desc='input 4D file', argstr='--y %s', mandatory=True, copyfile=False)
_design_xor = ('fsgd', 'design', 'one_sample')
fsgd = traits.Tuple(
File(exists=True),
traits.Enum('doss', 'dods'),
argstr='--fsgd %s %s',
xor=_design_xor,
desc='freesurfer descriptor file')
design = File(
exists=True,
argstr='--X %s',
xor=_design_xor,
desc='design matrix file')
contrast = InputMultiPath(
File(exists=True), argstr='--C %s...', desc='contrast file')
one_sample = traits.Bool(
argstr='--osgm',
xor=('one_sample', 'fsgd', 'design', 'contrast'),
desc='construct X and C as a one-sample group mean')
no_contrast_ok = traits.Bool(
argstr='--no-contrasts-ok',
desc='do not fail if no contrasts specified')
per_voxel_reg = InputMultiPath(
File(exists=True), argstr='--pvr %s...', desc='per-voxel regressors')
self_reg = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
argstr='--selfreg %d %d %d',
desc='self-regressor from index col row slice')
weighted_ls = File(
exists=True,
argstr='--wls %s',
xor=('weight_file', 'weight_inv', 'weight_sqrt'),
desc='weighted least squares')
fixed_fx_var = File(
exists=True, argstr='--yffxvar %s', desc='for fixed effects analysis')
fixed_fx_dof = traits.Int(
argstr='--ffxdof %d',
xor=['fixed_fx_dof_file'],
desc='dof for fixed effects analysis')
fixed_fx_dof_file = File(
argstr='--ffxdofdat %d',
xor=['fixed_fx_dof'],
desc='text file with dof for fixed effects analysis')
weight_file = File(
exists=True,
xor=['weighted_ls'],
desc='weight for each input at each voxel')
weight_inv = traits.Bool(
argstr='--w-inv', desc='invert weights', xor=['weighted_ls'])
weight_sqrt = traits.Bool(
argstr='--w-sqrt', desc='sqrt of weights', xor=['weighted_ls'])
fwhm = traits.Range(
low=0.0, argstr='--fwhm %f', desc='smooth input by fwhm')
var_fwhm = traits.Range(
low=0.0, argstr='--var-fwhm %f', desc='smooth variance by fwhm')
no_mask_smooth = traits.Bool(
argstr='--no-mask-smooth', desc='do not mask when smoothing')
no_est_fwhm = traits.Bool(
argstr='--no-est-fwhm', desc='turn off FWHM output estimation')
mask_file = File(exists=True, argstr='--mask %s', desc='binary mask')
label_file = File(
exists=True,
argstr='--label %s',
xor=['cortex'],
desc='use label as mask, surfaces only')
cortex = traits.Bool(
argstr='--cortex',
xor=['label_file'],
desc='use subjects ?h.cortex.label as label')
invert_mask = traits.Bool(argstr='--mask-inv', desc='invert mask')
prune = traits.Bool(
argstr='--prune',
desc=
'remove voxels that do not have a non-zero value at each frame (def)')
no_prune = traits.Bool(
argstr='--no-prune', xor=['prunethresh'], desc='do not prune')
prune_thresh = traits.Float(
argstr='--prune_thr %f',
xor=['noprune'],
desc='prune threshold. Default is FLT_MIN')
compute_log_y = traits.Bool(
argstr='--logy', desc='compute natural log of y prior to analysis')
save_estimate = traits.Bool(
argstr='--yhat-save', desc='save signal estimate (yhat)')
save_residual = traits.Bool(
argstr='--eres-save', desc='save residual error (eres)')
save_res_corr_mtx = traits.Bool(
argstr='--eres-scm',
desc='save residual error spatial correlation matrix (eres.scm). Big!')
surf = traits.Bool(
argstr="--surf %s %s %s",
requires=["subject_id", "hemi"],
desc="analysis is on a surface mesh")
subject_id = traits.Str(desc="subject id for surface geometry")
hemi = traits.Enum("lh", "rh", desc="surface hemisphere")
surf_geo = traits.Str(
"white",
usedefault=True,
desc="surface geometry name (e.g. white, pial)")
simulation = traits.Tuple(
traits.Enum('perm', 'mc-full', 'mc-z'),
traits.Int(min=1),
traits.Float,
traits.Str,
argstr='--sim %s %d %f %s',
desc='nulltype nsim thresh csdbasename')
sim_sign = traits.Enum(
'abs', 'pos', 'neg', argstr='--sim-sign %s', desc='abs, pos, or neg')
uniform = traits.Tuple(
traits.Float,
traits.Float,
argstr='--uniform %f %f',
desc='use uniform distribution instead of gaussian')
pca = traits.Bool(
argstr='--pca', desc='perform pca/svd analysis on residual')
calc_AR1 = traits.Bool(
argstr='--tar1', desc='compute and save temporal AR1 of residual')
save_cond = traits.Bool(
argstr='--save-cond',
desc='flag to save design matrix condition at each voxel')
vox_dump = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
argstr='--voxdump %d %d %d',
desc='dump voxel GLM and exit')
seed = traits.Int(argstr='--seed %d', desc='used for synthesizing noise')
synth = traits.Bool(argstr='--synth', desc='replace input with gaussian')
resynth_test = traits.Int(
argstr='--resynthtest %d', desc='test GLM by resynthsis')
profile = traits.Int(argstr='--profile %d', desc='niters : test speed')
force_perm = traits.Bool(
argstr='--perm-force',
desc='force perumtation test, even when design matrix is not orthog')
diag = traits.Int('--diag %d', desc='Gdiag_no : set diagnositc level')
diag_cluster = traits.Bool(
argstr='--diag-cluster',
desc='save sig volume and exit from first sim loop')
debug = traits.Bool(argstr='--debug', desc='turn on debugging')
check_opts = traits.Bool(
argstr='--checkopts',
desc="don't run anything, just check options and exit")
allow_repeated_subjects = traits.Bool(
argstr='--allowsubjrep',
desc=
'allow subject names to repeat in the fsgd file (must appear before --fsgd'
)
allow_ill_cond = traits.Bool(
argstr='--illcond', desc='allow ill-conditioned design matrices')
sim_done_file = File(
argstr='--sim-done %s', desc='create file when simulation finished')
class GLMFitOutputSpec(TraitedSpec):
glm_dir = Directory(exists=True, desc="output directory")
beta_file = File(exists=True, desc="map of regression coefficients")
error_file = File(desc="map of residual error")
error_var_file = File(desc="map of residual error variance")
error_stddev_file = File(desc="map of residual error standard deviation")
estimate_file = File(desc="map of the estimated Y values")
mask_file = File(desc="map of the mask used in the analysis")
fwhm_file = File(desc="text file with estimated smoothness")
dof_file = File(
desc="text file with effective degrees-of-freedom for the analysis")
gamma_file = OutputMultiPath(
desc="map of contrast of regression coefficients")
gamma_var_file = OutputMultiPath(
desc="map of regression contrast variance")
sig_file = OutputMultiPath(desc="map of F-test significance (in -log10p)")
ftest_file = OutputMultiPath(desc="map of test statistic values")
spatial_eigenvectors = File(
desc="map of spatial eigenvectors from residual PCA")
frame_eigenvectors = File(
desc="matrix of frame eigenvectors from residual PCA")
singular_values = File(desc="matrix singular values from residual PCA")
svd_stats_file = File(desc="text file summarizing the residual PCA")
class GLMFit(FSCommand):
_cmd = 'mri_glmfit'
input_spec = GLMFitInputSpec
output_spec = GLMFitOutputSpec
def _format_arg(self, name, spec, value):
if name == "surf":
_si = self.inputs
return spec.argstr % (_si.subject_id, _si.hemi, _si.surf_geo)
return super(GLMFit, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
# Get the top-level output directory
if not isdefined(self.inputs.glm_dir):
glmdir = os.getcwd()
else:
glmdir = os.path.abspath(self.inputs.glm_dir)
outputs["glm_dir"] = glmdir
# Assign the output files that always get created
outputs["beta_file"] = os.path.join(glmdir, "beta.mgh")
outputs["error_var_file"] = os.path.join(glmdir, "rvar.mgh")
outputs["error_stddev_file"] = os.path.join(glmdir, "rstd.mgh")
outputs["mask_file"] = os.path.join(glmdir, "mask.mgh")
outputs["fwhm_file"] = os.path.join(glmdir, "fwhm.dat")
outputs["dof_file"] = os.path.join(glmdir, "dof.dat")
# Assign the conditional outputs
if isdefined(self.inputs.save_residual) and self.inputs.save_residual:
outputs["error_file"] = os.path.join(glmdir, "eres.mgh")
if isdefined(self.inputs.save_estimate) and self.inputs.save_estimate:
outputs["estimate_file"] = os.path.join(glmdir, "yhat.mgh")
# Get the contrast directory name(s)
if isdefined(self.inputs.contrast):
contrasts = []
for c in self.inputs.contrast:
if split_filename(c)[2] in [".mat", ".dat", ".mtx", ".con"]:
contrasts.append(split_filename(c)[1])
else:
contrasts.append(os.path.split(c)[1])
elif isdefined(self.inputs.one_sample) and self.inputs.one_sample:
contrasts = ["osgm"]
# Add in the contrast images
outputs["sig_file"] = [
os.path.join(glmdir, c, "sig.mgh") for c in contrasts
]
outputs["ftest_file"] = [
os.path.join(glmdir, c, "F.mgh") for c in contrasts
]
outputs["gamma_file"] = [
os.path.join(glmdir, c, "gamma.mgh") for c in contrasts
]
outputs["gamma_var_file"] = [
os.path.join(glmdir, c, "gammavar.mgh") for c in contrasts
]
# Add in the PCA results, if relevant
if isdefined(self.inputs.pca) and self.inputs.pca:
pcadir = os.path.join(glmdir, "pca-eres")
outputs["spatial_eigenvectors"] = os.path.join(pcadir, "v.mgh")
outputs["frame_eigenvectors"] = os.path.join(pcadir, "u.mtx")
outputs["singluar_values"] = os.path.join(pcadir, "sdiag.mat")
outputs["svd_stats_file"] = os.path.join(pcadir, "stats.dat")
return outputs
def _gen_filename(self, name):
if name == 'glm_dir':
return os.getcwd()
return None
class OneSampleTTest(GLMFit):
def __init__(self, **kwargs):
super(OneSampleTTest, self).__init__(**kwargs)
self.inputs.one_sample = True
class BinarizeInputSpec(FSTraitedSpec):
in_file = File(
exists=True,
argstr='--i %s',
mandatory=True,
copyfile=False,
desc='input volume')
min = traits.Float(
argstr='--min %f', xor=['wm_ven_csf'], desc='min thresh')
max = traits.Float(
argstr='--max %f', xor=['wm_ven_csf'], desc='max thresh')
rmin = traits.Float(
argstr='--rmin %f', desc='compute min based on rmin*globalmean')
rmax = traits.Float(
argstr='--rmax %f', desc='compute max based on rmax*globalmean')
match = traits.List(
traits.Int, argstr='--match %d...', desc='match instead of threshold')
wm = traits.Bool(
argstr='--wm',
desc='set match vals to 2 and 41 (aseg for cerebral WM)')
ventricles = traits.Bool(
argstr='--ventricles',
desc='set match vals those for aseg ventricles+choroid (not 4th)')
wm_ven_csf = traits.Bool(
argstr='--wm+vcsf',
xor=['min', 'max'],
desc='WM and ventricular CSF, including choroid (not 4th)')
binary_file = File(
argstr='--o %s', genfile=True, desc='binary output volume')
out_type = traits.Enum(
'nii', 'nii.gz', 'mgz', argstr='', desc='output file type')
count_file = traits.Either(
traits.Bool,
File,
argstr='--count %s',
desc='save number of hits in ascii file (hits, ntotvox, pct)')
bin_val = traits.Int(
argstr='--binval %d',
desc='set vox within thresh to val (default is 1)')
bin_val_not = traits.Int(
argstr='--binvalnot %d',
desc='set vox outside range to val (default is 0)')
invert = traits.Bool(argstr='--inv', desc='set binval=0, binvalnot=1')
frame_no = traits.Int(
argstr='--frame %s', desc='use 0-based frame of input (default is 0)')
merge_file = File(
exists=True, argstr='--merge %s', desc='merge with mergevol')
mask_file = File(
exists=True, argstr='--mask maskvol', desc='must be within mask')
mask_thresh = traits.Float(
argstr='--mask-thresh %f', desc='set thresh for mask')
abs = traits.Bool(
argstr='--abs', desc='take abs of invol first (ie, make unsigned)')
bin_col_num = traits.Bool(
argstr='--bincol',
desc='set binarized voxel value to its column number')
zero_edges = traits.Bool(
argstr='--zero-edges', desc='zero the edge voxels')
zero_slice_edge = traits.Bool(
argstr='--zero-slice-edges', desc='zero the edge slice voxels')
dilate = traits.Int(
argstr='--dilate %d', desc='niters: dilate binarization in 3D')
erode = traits.Int(
argstr='--erode %d',
desc='nerode: erode binarization in 3D (after any dilation)')
erode2d = traits.Int(
argstr='--erode2d %d',
desc='nerode2d: erode binarization in 2D (after any 3D erosion)')
class BinarizeOutputSpec(TraitedSpec):
binary_file = File(exists=True, desc='binarized output volume')
count_file = File(desc='ascii file containing number of hits')
class Binarize(FSCommand):
_cmd = 'mri_binarize'
input_spec = BinarizeInputSpec
output_spec = BinarizeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.binary_file
if not isdefined(outfile):
if isdefined(self.inputs.out_type):
outfile = fname_presuffix(
self.inputs.in_file,
newpath=os.getcwd(),
suffix='.'.join(('_thresh', self.inputs.out_type)),
use_ext=False)
else:
outfile = fname_presuffix(
self.inputs.in_file, newpath=os.getcwd(), suffix='_thresh')
outputs['binary_file'] = os.path.abspath(outfile)
value = self.inputs.count_file
if isdefined(value):
if isinstance(value, bool):
if value:
outputs['count_file'] = fname_presuffix(
self.inputs.in_file,
suffix='_count.txt',
newpath=os.getcwd(),
use_ext=False)
else:
outputs['count_file'] = value
return outputs
def _format_arg(self, name, spec, value):
if name == 'count_file':
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
if name == 'out_type':
return ''
return super(Binarize, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'binary_file':
return self._list_outputs()[name]
return None
class ConcatenateInputSpec(FSTraitedSpec):
in_files = InputMultiPath(
File(exists=True),
desc='Individual volumes to be concatenated',
argstr='--i %s...',
mandatory=True)
concatenated_file = File(
desc='Output volume', argstr='--o %s', genfile=True)
sign = traits.Enum(
'abs',
'pos',
'neg',
argstr='--%s',
desc='Take only pos or neg voxles from input, or take abs')
stats = traits.Enum(
'sum',
'var',
'std',
'max',
'min',
'mean',
argstr='--%s',
desc='Compute the sum, var, std, max, min or mean of the input volumes'
)
paired_stats = traits.Enum(
'sum',
'avg',
'diff',
'diff-norm',
'diff-norm1',
'diff-norm2',
argstr='--paired-%s',
desc='Compute paired sum, avg, or diff')
gmean = traits.Int(
argstr='--gmean %d',
desc='create matrix to average Ng groups, Nper=Ntot/Ng')
mean_div_n = traits.Bool(
argstr='--mean-div-n', desc='compute mean/nframes (good for var)')
multiply_by = traits.Float(
argstr='--mul %f', desc='Multiply input volume by some amount')
add_val = traits.Float(
argstr='--add %f', desc='Add some amount to the input volume')
multiply_matrix_file = File(
exists=True,
argstr='--mtx %s',
desc='Multiply input by an ascii matrix in file')
combine = traits.Bool(
argstr='--combine',
desc='Combine non-zero values into single frame volume')
keep_dtype = traits.Bool(
argstr='--keep-datatype',
desc='Keep voxelwise precision type (default is float')
max_bonfcor = traits.Bool(
argstr='--max-bonfcor',
desc='Compute max and bonferroni correct (assumes -log10(ps))')
max_index = traits.Bool(
argstr='--max-index',
desc='Compute the index of max voxel in concatenated volumes')
mask_file = File(
exists=True, argstr='--mask %s', desc='Mask input with a volume')
vote = traits.Bool(
argstr='--vote',
desc='Most frequent value at each voxel and fraction of occurances')
sort = traits.Bool(
argstr='--sort', desc='Sort each voxel by ascending frame value')
class ConcatenateOutputSpec(TraitedSpec):
concatenated_file = File(
exists=True, desc='Path/name of the output volume')
class Concatenate(FSCommand):
_cmd = 'mri_concat'
input_spec = ConcatenateInputSpec
output_spec = ConcatenateOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
fname = self.inputs.concatenated_file
if not isdefined(fname):
fname = 'concat_output.nii.gz'
outputs['concatenated_file'] = os.path.join(os.getcwd(), fname)
return outputs
def _gen_filename(self, name):
if name == 'concatenated_file':
return self._list_outputs()[name]
return None
class SegStatsInputSpec(FSTraitedSpec):
_xor_inputs = ('segmentation_file', 'annot', 'surf_label')
segmentation_file = File(
exists=True,
argstr='--seg %s',
xor=_xor_inputs,
mandatory=True,
desc='segmentation volume path')
annot = traits.Tuple(
traits.Str,
traits.Enum('lh', 'rh'),
traits.Str,
argstr='--annot %s %s %s',
xor=_xor_inputs,
mandatory=True,
desc='subject hemi parc : use surface parcellation')
surf_label = traits.Tuple(
traits.Str,
traits.Enum('lh', 'rh'),
traits.Str,
argstr='--slabel %s %s %s',
xor=_xor_inputs,
mandatory=True,
desc='subject hemi label : use surface label')
summary_file = File(
argstr='--sum %s',
genfile=True,
position=-1,
desc='Segmentation stats summary table file')
partial_volume_file = File(
exists=True, argstr='--pv %s', desc='Compensate for partial voluming')
in_file = File(
exists=True,
argstr='--i %s',
desc='Use the segmentation to report stats on this volume')
frame = traits.Int(
argstr='--frame %d', desc='Report stats on nth frame of input volume')
multiply = traits.Float(argstr='--mul %f', desc='multiply input by val')
calc_snr = traits.Bool(
argstr='--snr', desc='save mean/std as extra column in output table')
calc_power = traits.Enum(
'sqr',
'sqrt',
argstr='--%s',
desc='Compute either the sqr or the sqrt of the input')
_ctab_inputs = ('color_table_file', 'default_color_table',
'gca_color_table')
color_table_file = File(
exists=True,
argstr='--ctab %s',
xor=_ctab_inputs,
desc='color table file with seg id names')
default_color_table = traits.Bool(
argstr='--ctab-default',
xor=_ctab_inputs,
desc='use $FREESURFER_HOME/FreeSurferColorLUT.txt')
gca_color_table = File(
exists=True,
argstr='--ctab-gca %s',
xor=_ctab_inputs,
desc='get color table from GCA (CMA)')
segment_id = traits.List(
argstr='--id %s...', desc='Manually specify segmentation ids')
exclude_id = traits.Int(
argstr='--excludeid %d', desc='Exclude seg id from report')
exclude_ctx_gm_wm = traits.Bool(
argstr='--excl-ctxgmwm', desc='exclude cortical gray and white matter')
wm_vol_from_surf = traits.Bool(
argstr='--surf-wm-vol', desc='Compute wm volume from surf')
cortex_vol_from_surf = traits.Bool(
argstr='--surf-ctx-vol', desc='Compute cortex volume from surf')
non_empty_only = traits.Bool(
argstr='--nonempty', desc='Only report nonempty segmentations')
empty = traits.Bool(
argstr="--empty",
desc="Report on segmentations listed in the color table")
mask_file = File(
exists=True, argstr='--mask %s', desc='Mask volume (same size as seg')
mask_thresh = traits.Float(
argstr='--maskthresh %f',
desc='binarize mask with this threshold <0.5>')
mask_sign = traits.Enum(
'abs',
'pos',
'neg',
'--masksign %s',
desc='Sign for mask threshold: pos, neg, or abs')
mask_frame = traits.Int(
'--maskframe %d',
requires=['mask_file'],
desc='Mask with this (0 based) frame of the mask volume')
mask_invert = traits.Bool(
argstr='--maskinvert', desc='Invert binarized mask volume')
mask_erode = traits.Int(
argstr='--maskerode %d', desc='Erode mask by some amount')
brain_vol = traits.Enum(
'brain-vol-from-seg',
'brainmask',
argstr='--%s',
desc=
'Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``'
)
brainmask_file = File(
argstr="--brainmask %s",
exists=True,
desc=
"Load brain mask and compute the volume of the brain as the non-zero voxels in this volume"
)
etiv = traits.Bool(
argstr='--etiv', desc='Compute ICV from talairach transform')
etiv_only = traits.Enum(
'etiv',
'old-etiv',
'--%s-only',
desc='Compute etiv and exit. Use ``etiv`` or ``old-etiv``')
avgwf_txt_file = traits.Either(
traits.Bool,
File,
argstr='--avgwf %s',
desc='Save average waveform into file (bool or filename)')
avgwf_file = traits.Either(
traits.Bool,
File,
argstr='--avgwfvol %s',
desc='Save as binary volume (bool or filename)')
sf_avg_file = traits.Either(
traits.Bool,
File,
argstr='--sfavg %s',
desc='Save mean across space and time')
vox = traits.List(
traits.Int,
argstr='--vox %s',
desc='Replace seg with all 0s except at C R S (three int inputs)')
supratent = traits.Bool(
argstr="--supratent", desc="Undocumented input flag")
subcort_gm = traits.Bool(
argstr="--subcortgray",
desc="Compute volume of subcortical gray matter")
total_gray = traits.Bool(
argstr="--totalgray", desc="Compute volume of total gray matter")
euler = traits.Bool(
argstr="--euler",
desc=
"Write out number of defect holes in orig.nofix based on the euler number"
)
in_intensity = File(
argstr="--in %s --in-intensity-name %s",
desc="Undocumented input norm.mgz file")
intensity_units = traits.Enum(
'MR',
argstr="--in-intensity-units %s",
requires=["in_intensity"],
desc="Intensity units")
class SegStatsOutputSpec(TraitedSpec):
summary_file = File(
exists=True, desc='Segmentation summary statistics table')
avgwf_txt_file = File(
desc='Text file with functional statistics averaged over segs')
avgwf_file = File(
desc='Volume with functional statistics averaged over segs')
sf_avg_file = File(
desc='Text file with func statistics averaged over segs and framss')
class SegStats(FSCommand):
_cmd = 'mri_segstats'
input_spec = SegStatsInputSpec
output_spec = SegStatsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.summary_file):
outputs['summary_file'] = os.path.abspath(self.inputs.summary_file)
else:
outputs['summary_file'] = os.path.join(os.getcwd(),
'summary.stats')
suffices = dict(
avgwf_txt_file='_avgwf.txt',
avgwf_file='_avgwf.nii.gz',
sf_avg_file='sfavg.txt')
if isdefined(self.inputs.segmentation_file):
_, src = os.path.split(self.inputs.segmentation_file)
if isdefined(self.inputs.annot):
src = '_'.join(self.inputs.annot)
if isdefined(self.inputs.surf_label):
src = '_'.join(self.inputs.surf_label)
for name, suffix in list(suffices.items()):
value = getattr(self.inputs, name)
if isdefined(value):
if isinstance(value, bool):
outputs[name] = fname_presuffix(
src, suffix=suffix, newpath=os.getcwd(), use_ext=False)
else:
outputs[name] = os.path.abspath(value)
return outputs
def _format_arg(self, name, spec, value):
if name in ('summary_file', 'avgwf_txt_file'):
if not isinstance(value, bool):
if not os.path.isabs(value):
value = os.path.join('.', value)
if name in ['avgwf_txt_file', 'avgwf_file', 'sf_avg_file']:
if isinstance(value, bool):
fname = self._list_outputs()[name]
else:
fname = value
return spec.argstr % fname
elif name == 'in_intensity':
intensity_name = os.path.basename(
self.inputs.in_intensity).replace('.mgz', '')
return spec.argstr % (value, intensity_name)
return super(SegStats, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'summary_file':
return self._list_outputs()[name]
return None
class SegStatsReconAllInputSpec(SegStatsInputSpec):
# recon-all input requirements
subject_id = traits.String(
'subject_id',
usedefault=True,
argstr="--subject %s",
mandatory=True,
desc="Subject id being processed")
# implicit
ribbon = traits.File(
mandatory=True, exists=True, desc="Input file mri/ribbon.mgz")
presurf_seg = File(exists=True, desc="Input segmentation volume")
transform = File(mandatory=True, exists=True, desc="Input transform file")
lh_orig_nofix = File(
mandatory=True, exists=True, desc="Input lh.orig.nofix")
rh_orig_nofix = File(
mandatory=True, exists=True, desc="Input rh.orig.nofix")
lh_white = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/lh.white")
rh_white = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/rh.white")
lh_pial = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/lh.pial")
rh_pial = File(
mandatory=True,
exists=True,
desc="Input file must be <subject_id>/surf/rh.pial")
aseg = File(exists=True, desc="Mandatory implicit input in 5.3")
copy_inputs = traits.Bool(desc="If running as a node, set this to True " +
"otherwise, this will copy the implicit inputs "
+ "to the node directory.")
class SegStatsReconAll(SegStats):
input_spec = SegStatsReconAllInputSpec
output_spec = SegStatsOutputSpec
def _format_arg(self, name, spec, value):
if name == 'brainmask_file':
return spec.argstr % os.path.basename(value)
return super(SegStatsReconAll, self)._format_arg(name, spec, value)
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.lh_orig_nofix, 'surf',
'lh.orig.nofix')
copy2subjdir(self, self.inputs.rh_orig_nofix, 'surf',
'rh.orig.nofix')
copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white')
copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white')
copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial')
copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial')
copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz')
copy2subjdir(self, self.inputs.presurf_seg, 'mri',
'aseg.presurf.mgz')
copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.mgz')
copy2subjdir(self, self.inputs.transform,
os.path.join('mri', 'transforms'), 'talairach.xfm')
copy2subjdir(self, self.inputs.in_intensity, 'mri')
copy2subjdir(self, self.inputs.brainmask_file, 'mri')
return super(SegStatsReconAll, self).run(**inputs)
class Label2VolInputSpec(FSTraitedSpec):
label_file = InputMultiPath(
File(exists=True),
argstr='--label %s...',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
copyfile=False,
mandatory=True,
desc='list of label files')
annot_file = File(
exists=True,
argstr='--annot %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
requires=('subject_id', 'hemi'),
mandatory=True,
copyfile=False,
desc='surface annotation file')
seg_file = File(
exists=True,
argstr='--seg %s',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
copyfile=False,
desc='segmentation file')
aparc_aseg = traits.Bool(
argstr='--aparc+aseg',
xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'),
mandatory=True,
desc='use aparc+aseg.mgz in subjectdir as seg')
template_file = File(
exists=True,
argstr='--temp %s',
mandatory=True,
desc='output template volume')
reg_file = File(
exists=True,
argstr='--reg %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='tkregister style matrix VolXYZ = R*LabelXYZ')
reg_header = File(
exists=True,
argstr='--regheader %s',
xor=('reg_file', 'reg_header', 'identity'),
desc='label template volume')
identity = traits.Bool(
argstr='--identity',
xor=('reg_file', 'reg_header', 'identity'),
desc='set R=I')
invert_mtx = traits.Bool(
argstr='--invertmtx', desc='Invert the registration matrix')
fill_thresh = traits.Range(
0., 1., argstr='--fillthresh %g', desc='thresh : between 0 and 1')
label_voxel_volume = traits.Float(
argstr='--labvoxvol %f', desc='volume of each label point (def 1mm3)')
proj = traits.Tuple(
traits.Enum('abs', 'frac'),
traits.Float,
traits.Float,
traits.Float,
argstr='--proj %s %f %f %f',
requires=('subject_id', 'hemi'),
desc='project along surface normal')
subject_id = traits.Str(argstr='--subject %s', desc='subject id')
hemi = traits.Enum(
'lh', 'rh', argstr='--hemi %s', desc='hemisphere to use lh or rh')
surface = traits.Str(
argstr='--surf %s', desc='use surface instead of white')
vol_label_file = File(argstr='--o %s', genfile=True, desc='output volume')
label_hit_file = File(
argstr='--hits %s', desc='file with each frame is nhits for a label')
map_label_stat = File(
argstr='--label-stat %s',
desc='map the label stats field into the vol')
native_vox2ras = traits.Bool(
argstr='--native-vox2ras',
desc='use native vox2ras xform instead of tkregister-style')
class Label2VolOutputSpec(TraitedSpec):
vol_label_file = File(exists=True, desc='output volume')
class Label2Vol(FSCommand):
_cmd = 'mri_label2vol'
input_spec = Label2VolInputSpec
output_spec = Label2VolOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.vol_label_file
if not isdefined(outfile):
for key in ['label_file', 'annot_file', 'seg_file']:
if isdefined(getattr(self.inputs, key)):
path = getattr(self.inputs, key)
if isinstance(path, list):
path = path[0]
_, src = os.path.split(path)
if isdefined(self.inputs.aparc_aseg):
src = 'aparc+aseg.mgz'
outfile = fname_presuffix(
src, suffix='_vol.nii.gz', newpath=os.getcwd(), use_ext=False)
outputs['vol_label_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'vol_label_file':
return self._list_outputs()[name]
return None
class MS_LDAInputSpec(FSTraitedSpec):
lda_labels = traits.List(
traits.Int(),
argstr='-lda %s',
mandatory=True,
minlen=2,
maxlen=2,
sep=' ',
desc='pair of class labels to optimize')
weight_file = traits.File(
argstr='-weight %s',
mandatory=True,
desc='filename for the LDA weights (input or output)')
vol_synth_file = traits.File(
exists=False,
argstr='-synth %s',
mandatory=True,
desc=('filename for the synthesized output '
'volume'))
label_file = traits.File(
exists=True, argstr='-label %s', desc='filename of the label volume')
mask_file = traits.File(
exists=True,
argstr='-mask %s',
desc='filename of the brain mask volume')
shift = traits.Int(
argstr='-shift %d',
desc='shift all values equal to the given value to zero')
conform = traits.Bool(
argstr='-conform',
desc=('Conform the input volumes (brain mask '
'typically already conformed)'))
use_weights = traits.Bool(
argstr='-W',
desc=('Use the weights from a previously '
'generated weight file'))
images = InputMultiPath(
File(exists=True),
argstr='%s',
mandatory=True,
copyfile=False,
desc='list of input FLASH images',
position=-1)
class MS_LDAOutputSpec(TraitedSpec):
weight_file = File(exists=True, desc='')
vol_synth_file = File(exists=True, desc='')
class MS_LDA(FSCommand):
_cmd = 'mri_ms_LDA'
input_spec = MS_LDAInputSpec
output_spec = MS_LDAOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.output_synth):
outputs['vol_synth_file'] = os.path.abspath(
self.inputs.output_synth)
else:
outputs['vol_synth_file'] = os.path.abspath(
self.inputs.vol_synth_file)
if not isdefined(
self.inputs.use_weights) or self.inputs.use_weights is False:
outputs['weight_file'] = os.path.abspath(self.inputs.weight_file)
return outputs
def _verify_weights_file_exists(self):
if not os.path.exists(os.path.abspath(self.inputs.weight_file)):
raise traits.TraitError(
"MS_LDA: use_weights must accompany an existing weights file")
def _format_arg(self, name, spec, value):
if name == 'use_weights':
if self.inputs.use_weights is True:
self._verify_weights_file_exists()
else:
return ''
# TODO: Fix bug when boolean values are set explicitly to false
return super(MS_LDA, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
pass
class Label2LabelInputSpec(FSTraitedSpec):
hemisphere = traits.Enum(
'lh',
'rh',
argstr="--hemi %s",
mandatory=True,
desc="Input hemisphere")
subject_id = traits.String(
'subject_id',
usedefault=True,
argstr="--trgsubject %s",
mandatory=True,
desc="Target subject")
sphere_reg = File(
mandatory=True,
exists=True,
desc="Implicit input <hemisphere>.sphere.reg")
white = File(
mandatory=True, exists=True, desc="Implicit input <hemisphere>.white")
source_sphere_reg = File(
mandatory=True,
exists=True,
desc="Implicit input <hemisphere>.sphere.reg")
source_white = File(
mandatory=True, exists=True, desc="Implicit input <hemisphere>.white")
source_label = File(
argstr="--srclabel %s",
mandatory=True,
exists=True,
desc="Source label")
source_subject = traits.String(
argstr="--srcsubject %s", mandatory=True, desc="Source subject name")
# optional
out_file = File(
argstr="--trglabel %s",
name_source=['source_label'],
name_template='%s_converted',
hash_files=False,
keep_extension=True,
desc="Target label")
registration_method = traits.Enum(
'surface',
'volume',
usedefault=True,
argstr="--regmethod %s",
desc="Registration method")
copy_inputs = traits.Bool(
desc="If running as a node, set this to True." +
"This will copy the input files to the node " + "directory.")
class Label2LabelOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Output label')
class Label2Label(FSCommand):
_cmd = 'mri_label2label'
input_spec = Label2LabelInputSpec
output_spec = Label2LabelOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label',
self.inputs.out_file)
return outputs
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
hemi = self.inputs.hemisphere
copy2subjdir(self, self.inputs.sphere_reg, 'surf',
'{0}.sphere.reg'.format(hemi))
copy2subjdir(self, self.inputs.white, 'surf',
'{0}.white'.format(hemi))
copy2subjdir(
self,
self.inputs.source_sphere_reg,
'surf',
'{0}.sphere.reg'.format(hemi),
subject_id=self.inputs.source_subject)
copy2subjdir(
self,
self.inputs.source_white,
'surf',
'{0}.white'.format(hemi),
subject_id=self.inputs.source_subject)
# label dir must exist in order for output file to be written
label_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label')
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
return super(Label2Label, self).run(**inputs)
class Label2AnnotInputSpec(FSTraitedSpec):
# required
hemisphere = traits.Enum(
'lh',
'rh',
argstr="--hemi %s",
mandatory=True,
desc="Input hemisphere")
subject_id = traits.String(
'subject_id',
usedefault=True,
argstr="--s %s",
mandatory=True,
desc="Subject name/ID")
in_labels = traits.List(
argstr="--l %s...", mandatory=True, desc="List of input label files")
out_annot = traits.String(
argstr="--a %s",
mandatory=True,
desc="Name of the annotation to create")
orig = File(exists=True, mandatory=True, desc="implicit {hemisphere}.orig")
# optional
keep_max = traits.Bool(
argstr="--maxstatwinner", desc="Keep label with highest 'stat' value")
verbose_off = traits.Bool(
argstr="--noverbose",
desc="Turn off overlap and stat override messages")
color_table = File(
argstr="--ctab %s",
exists=True,
desc=
"File that defines the structure names, their indices, and their color"
)
copy_inputs = traits.Bool(
desc="copy implicit inputs and create a temp subjects_dir")
class Label2AnnotOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='Output annotation file')
class Label2Annot(FSCommand):
_cmd = 'mris_label2annot'
input_spec = Label2AnnotInputSpec
output_spec = Label2AnnotOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(
self,
self.inputs.orig,
folder='surf',
basename='{0}.orig'.format(self.inputs.hemisphere))
# label dir must exist in order for output file to be written
label_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label')
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
return super(Label2Annot, self).run(**inputs)
def _list_outputs(self):
outputs = self._outputs().get()
outputs["out_file"] = os.path.join(
str(self.inputs.subjects_dir), str(self.inputs.subject_id),
'label',
str(self.inputs.hemisphere) + '.' + str(self.inputs.out_annot) +
'.annot')
return outputs
class SphericalAverageInputSpec(FSTraitedSpec):
out_file = File(
argstr="%s",
genfile=True,
exists=False,
position=-1,
desc="Output filename")
in_average = traits.Directory(
argstr="%s",
exists=True,
genfile=True,
position=-2,
desc="Average subject")
in_surf = File(
argstr="%s",
mandatory=True,
exists=True,
position=-3,
desc="Input surface file")
hemisphere = traits.Enum(
'lh',
'rh',
argstr="%s",
mandatory=True,
position=-4,
desc="Input hemisphere")
fname = traits.String(
argstr="%s",
mandatory=True,
position=-5,
desc="""Filename from the average subject directory.
Example: to use rh.entorhinal.label as the input label
filename, set fname to 'rh.entorhinal' and which to
'label'. The program will then search for
'{in_average}/label/rh.entorhinal.label'
""")
which = traits.Enum(
'coords',
'label',
'vals',
'curv',
'area',
argstr="%s",
mandatory=True,
position=-6,
desc="No documentation")
subject_id = traits.String(
argstr="-o %s", mandatory=True, desc="Output subject id")
# optional
erode = traits.Int(argstr="-erode %d", desc="Undocumented")
in_orig = File(
argstr="-orig %s", exists=True, desc="Original surface filename")
threshold = traits.Float(argstr="-t %.1f", desc="Undocumented")
class SphericalAverageOutputSpec(TraitedSpec):
out_file = File(exists=False, desc='Output label')
class SphericalAverage(FSCommand):
_cmd = 'mris_spherical_average'
input_spec = SphericalAverageInputSpec
output_spec = SphericalAverageOutputSpec
def _format_arg(self, name, spec, value):
if name == 'in_orig' or name == 'in_surf':
surf = os.path.basename(value)
for item in ['lh.', 'rh.']:
surf = surf.replace(item, '')
return spec.argstr % surf
return super(SphericalAverage, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'in_average':
avg_subject = str(self.inputs.hemisphere) + '.EC_average'
avg_directory = os.path.join(self.inputs.subjects_dir, avg_subject)
if not os.path.isdir(avg_directory):
fs_home = os.path.abspath(os.environ.get('FREESURFER_HOME'))
return avg_subject
elif name == 'out_file':
return self._list_outputs()[name]
else:
return None
def _list_outputs(self):
outputs = self._outputs().get()
if isdefined(self.inputs.out_file):
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
else:
out_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id, 'label')
if isdefined(self.inputs.in_average):
basename = os.path.basename(self.inputs.in_average)
basename = basename.replace('_', '_exvivo_') + '.label'
else:
basename = str(
self.inputs.hemisphere) + '.EC_exvivo_average.label'
outputs['out_file'] = os.path.join(out_dir, basename)
return outputs
| true | true |
1c3973532340e2fde1f06a6cd7564ce5420b57a7 | 1,701 | py | Python | lenstronomy/GalKin/aperture.py | jiwoncpark/lenstronomy | c1d12580f8d8cf1d065d80568a58c0694e23945a | [
"MIT"
] | 1 | 2020-07-31T07:55:17.000Z | 2020-07-31T07:55:17.000Z | lenstronomy/GalKin/aperture.py | jiwoncpark/lenstronomy | c1d12580f8d8cf1d065d80568a58c0694e23945a | [
"MIT"
] | null | null | null | lenstronomy/GalKin/aperture.py | jiwoncpark/lenstronomy | c1d12580f8d8cf1d065d80568a58c0694e23945a | [
"MIT"
] | 2 | 2020-10-26T10:45:11.000Z | 2021-03-04T12:25:19.000Z | __author__ = 'sibirrer'
from lenstronomy.GalKin.aperture_types import Shell, Slit, IFUShells, Frame
"""
class that defines the aperture of the measurement (e.g. slit, integral field spectroscopy regions etc)
Available aperture types:
-------------------------
'slit': length, width, center_ra, center_dec, angle
'shell': r_in, r_out, center_ra, center_dec
"""
class Aperture(object):
"""
defines mask(s) of spectra, can handle IFU and single slit/box type data.
"""
def __init__(self, aperture_type, **kwargs_aperture):
"""
:param aperture_type: string
:param kwargs_aperture: keyword arguments reflecting the aperture type chosen.
We refer to the specific class instances for documentation.
"""
if aperture_type == 'slit':
self._aperture = Slit(**kwargs_aperture)
elif aperture_type == 'shell':
self._aperture = Shell(**kwargs_aperture)
elif aperture_type == 'IFU_shells':
self._aperture = IFUShells(**kwargs_aperture)
elif aperture_type == 'frame':
self._aperture = Frame(**kwargs_aperture)
else:
raise ValueError("aperture type %s not implemented! Available are 'slit', 'shell', 'IFU_shells'. " % aperture_type)
def aperture_select(self, ra, dec):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:return: bool, True if photon/ray is within the slit, False otherwise, int of the segment of the IFU
"""
return self._aperture.aperture_select(ra, dec)
@property
def num_segments(self):
return self._aperture.num_segments
| 32.711538 | 127 | 0.650794 | __author__ = 'sibirrer'
from lenstronomy.GalKin.aperture_types import Shell, Slit, IFUShells, Frame
class Aperture(object):
def __init__(self, aperture_type, **kwargs_aperture):
if aperture_type == 'slit':
self._aperture = Slit(**kwargs_aperture)
elif aperture_type == 'shell':
self._aperture = Shell(**kwargs_aperture)
elif aperture_type == 'IFU_shells':
self._aperture = IFUShells(**kwargs_aperture)
elif aperture_type == 'frame':
self._aperture = Frame(**kwargs_aperture)
else:
raise ValueError("aperture type %s not implemented! Available are 'slit', 'shell', 'IFU_shells'. " % aperture_type)
def aperture_select(self, ra, dec):
return self._aperture.aperture_select(ra, dec)
@property
def num_segments(self):
return self._aperture.num_segments
| true | true |
1c3974a1211e3f805b943edfec236b31e43b98b3 | 176,984 | py | Python | mne_qt_browser/_pg_figure.py | cbrnr/mne-qt-browser | 8ed661d2317d0bfc4c25fdbcabcdf2ea581d2f1c | [
"BSD-3-Clause"
] | null | null | null | mne_qt_browser/_pg_figure.py | cbrnr/mne-qt-browser | 8ed661d2317d0bfc4c25fdbcabcdf2ea581d2f1c | [
"BSD-3-Clause"
] | null | null | null | mne_qt_browser/_pg_figure.py | cbrnr/mne-qt-browser | 8ed661d2317d0bfc4c25fdbcabcdf2ea581d2f1c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Base classes and functions for 2D browser backends."""
# Author: Martin Schulz <dev@earthman-music.de>
#
# License: BSD-3-Clause
import datetime
import functools
import gc
import math
import platform
import sys
from ast import literal_eval
from collections import OrderedDict
from contextlib import contextmanager
from copy import copy
from functools import partial
from os.path import getsize
import numpy as np
from PyQt5.QtCore import (QEvent, QThread, Qt, pyqtSignal, QRectF, QLineF,
QPoint, QSettings)
from PyQt5.QtGui import (QFont, QIcon, QPixmap, QTransform,
QMouseEvent, QImage, QPainter, QPainterPath)
from PyQt5.QtTest import QTest
from PyQt5.QtWidgets import (QAction, QColorDialog, QComboBox, QDialog,
QDockWidget, QDoubleSpinBox, QFormLayout,
QGridLayout, QHBoxLayout, QInputDialog,
QLabel, QMainWindow, QMessageBox,
QPushButton, QScrollBar, QToolTip, QWidget,
QStyleOptionSlider, QStyle,
QApplication, QGraphicsView, QProgressBar,
QVBoxLayout, QLineEdit, QCheckBox, QScrollArea,
QGraphicsLineItem, QGraphicsScene, QTextEdit,
QSizePolicy, QSpinBox, QDesktopWidget, QSlider)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.colors import to_rgba_array
from pyqtgraph import (AxisItem, GraphicsView, InfLineLabel, InfiniteLine,
LinearRegionItem, PlotCurveItem, PlotItem,
Point, TextItem, ViewBox, mkBrush,
mkPen, setConfigOption, mkColor)
from scipy.stats import zscore
from mne.viz import plot_sensors
from mne.viz._figure import BrowserBase
from mne.viz.utils import _simplify_float, _merge_annotations, _figure_agg
from mne.annotations import _sync_onset
from mne.io.pick import (_DATA_CH_TYPES_ORDER_DEFAULT,
channel_indices_by_type, _DATA_CH_TYPES_SPLIT)
from mne.utils import _to_rgb, logger, sizeof_fmt, warn, get_config
from . import _browser_instances
from .icons import resources # noqa: F401
try:
from pytestqt.exceptions import capture_exceptions
except ImportError:
logger.debug('If pytest-qt is not installed, the errors from inside '
'the Qt-loop will be occluded and it will be harder '
'to trace back the cause.')
@contextmanager
def capture_exceptions():
yield []
name = 'pyqtgraph'
# This can be removed when mne==1.0 is released.
try:
from mne.viz.backends._utils import _init_mne_qtapp
except ImportError:
from mne.viz.backends._utils import _init_qt_resources
def _init_mne_qtapp(enable_icon=True, pg_app=False):
"""Get QApplication-instance for MNE-Python.
Parameter
---------
enable_icon: bool
If to set an MNE-icon for the app.
pg_app: bool
If to create the QApplication with pyqtgraph. For an until know
undiscovered reason the pyqtgraph-browser won't show without
mkQApp from pyqtgraph.
Returns
-------
app: ``PyQt5.QtWidgets.QApplication``
Instance of QApplication.
"""
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QIcon
app_name = 'MNE-Python'
organization_name = 'MNE'
# Fix from cbrnr/mnelab for app name in menu bar
if sys.platform.startswith("darwin"):
try:
# set bundle name on macOS (app name shown in the menu bar)
from Foundation import NSBundle
bundle = NSBundle.mainBundle()
info = (bundle.localizedInfoDictionary()
or bundle.infoDictionary())
info["CFBundleName"] = app_name
except ModuleNotFoundError:
pass
if pg_app:
from pyqtgraph import mkQApp
app = mkQApp(app_name)
else:
app = (QApplication.instance()
or QApplication(sys.argv or [app_name]))
app.setApplicationName(app_name)
app.setOrganizationName(organization_name)
if enable_icon:
# Set icon
_init_qt_resources()
kind = 'bigsur-' if platform.mac_ver()[0] >= '10.16' else ''
app.setWindowIcon(QIcon(f":/mne-{kind}icon.png"))
return app
def _get_color(color_spec):
"""Wraps mkColor to accept all possible matplotlib color-specifiers."""
try:
# Convert matplotlib color-names if possible
color_spec = _to_rgb(color_spec, alpha=True)
except ValueError:
pass
# Convert tuples of floats from 0-1 to 0-255 for pyqtgraph
if (isinstance(color_spec, tuple) and
all([i <= 1 for i in color_spec])):
color_spec = tuple([int(i * 255) for i in color_spec])
try:
color = mkColor(color_spec)
except ValueError:
raise ValueError(f'"{color_spec}" is not a valid matplotlib '
f'color-specifier!') from None
return color
def propagate_to_children(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
propagate = kwargs.pop('propagate', True)
result = method(*args, **kwargs)
if args[0].mne.is_epochs and propagate:
# parent always goes first
if hasattr(args[0], 'child_traces'):
for child_trace in args[0].child_traces:
getattr(child_trace, method.__name__)(*args[1:], **kwargs)
return result
return wrapper
class DataTrace(PlotCurveItem):
"""Graphics-Object for single data trace."""
def __init__(self, main, ch_idx, child_idx=None, parent_trace=None):
super().__init__()
self.main = main
self.mne = main.mne
# Set clickable with small area around trace to make clicking easier.
self.setClickable(True, 12)
# Set default z-value to 1 to be before other items in scene
self.setZValue(1)
# General attributes
# The ch_idx is the index of the channel represented by this trace
# in the channel-order from the unchanged instance (which also picks
# refer to).
self.ch_idx = None
# The range_idx is the index of the channel represented by this trace
# in the shown range.
self.range_idx = None
# The order_idx is the index of the channel represented by this trace
# in the channel-order (defined e.g. by group_by).
self.order_idx = None
# Name of the channel the trace represents.
self.ch_name = None
# Indicates if trace is bad.
self.isbad = None
# Channel-type of trace.
self.ch_type = None
# Color-specifier (all possible matplotlib color formats)
self.color = None
# Attributes for epochs-mode
# Index of child if child.
self.child_idx = child_idx
# Reference to parent if child.
self.parent_trace = parent_trace
# Only for parent traces
if self.parent_trace is None:
# Add to main trace list
self.mne.traces.append(self)
# References to children
self.child_traces = list()
# Colors of trace in viewrange
self.trace_colors = None
# set attributes
self.set_ch_idx(ch_idx)
self.update_color()
self.update_scale()
# Avoid calling self.update_data() twice on initialization
# (because of update_scale()).
if self.mne.clipping is None:
self.update_data()
# Add to main plot
self.mne.plt.addItem(self)
@propagate_to_children
def remove(self):
self.mne.plt.removeItem(self)
# Only for parent trace
if self.parent_trace is None:
self.mne.traces.remove(self)
self.deleteLater()
@propagate_to_children
def update_color(self):
"""Update the color of the trace."""
# Epochs
if self.mne.is_epochs:
# Add child traces if shown trace needs to have multiple colors
# (PlotCurveItem only supports one color per object).
# There always as many color-specific traces added depending
# on the whole time range of the instance regardless of the
# currently visible time range (to avoid checking for new colors
# while scrolling horizontally).
# Only for parent trace
if hasattr(self, 'child_traces'):
self.trace_colors = np.unique(
self.mne.epoch_color_ref[self.ch_idx], axis=0)
n_childs = len(self.child_traces)
trace_diff = len(self.trace_colors) - n_childs - 1
# Add child traces if necessary
if trace_diff > 0:
for cix in range(n_childs, n_childs + trace_diff):
child = DataTrace(self.main, self.ch_idx,
child_idx=cix, parent_trace=self)
self.child_traces.append(child)
elif trace_diff < 0:
for _ in range(abs(trace_diff)):
rm_trace = self.child_traces.pop()
rm_trace.remove()
# Set parent color
self.color = self.trace_colors[0]
# Only for child trace
else:
self.color = self.parent_trace.trace_colors[
self.child_idx + 1]
# Raw/ICA
else:
if self.isbad:
self.color = self.mne.ch_color_bad
else:
self.color = self.mne.ch_color_ref[self.ch_name]
self.setPen(_get_color(self.color))
@propagate_to_children
def update_range_idx(self):
"""Should be updated when view-range or ch_idx changes."""
self.range_idx = np.argwhere(self.mne.picks == self.ch_idx)[0][0]
@propagate_to_children
def update_ypos(self):
"""Should be updated when butterfly is toggled or ch_idx changes."""
if self.mne.butterfly and self.mne.fig_selection is not None:
self.ypos = self.mne.selection_ypos_dict[self.ch_idx]
elif self.mne.fig_selection is not None and \
self.mne.old_selection == 'Custom':
self.ypos = self.range_idx + 1
elif self.mne.butterfly:
self.ypos = self.mne.butterfly_type_order.index(self.ch_type) + 1
else:
self.ypos = self.range_idx + self.mne.ch_start + 1
@propagate_to_children
def update_scale(self):
transform = QTransform()
transform.scale(1., self.mne.scale_factor)
self.setTransform(transform)
if self.mne.clipping is not None:
self.update_data(propagate=False)
@propagate_to_children
def set_ch_idx(self, ch_idx):
"""Sets the channel index and all deriving indices."""
# The ch_idx is the index of the channel represented by this trace
# in the channel-order from the unchanged instance (which also picks
# refer to).
self.ch_idx = ch_idx
# The range_idx is the index of the channel represented by this trace
# in the shown range.
self.update_range_idx(propagate=False)
# The order_idx is the index of the channel represented by this trace
# in the channel-order (defined e.g. by group_by).
self.order_idx = np.argwhere(self.mne.ch_order == self.ch_idx)[0][0]
self.ch_name = self.mne.inst.ch_names[ch_idx]
self.isbad = self.ch_name in self.mne.info['bads']
self.ch_type = self.mne.ch_types[ch_idx]
self.update_ypos(propagate=False)
@propagate_to_children
def update_data(self):
"""Update data (fetch data from self.mne according to self.ch_idx)."""
if self.mne.is_epochs or (self.mne.clipping is not None and
self.mne.clipping != 'clamp'):
connect = 'finite'
skip = False
else:
connect = 'all'
skip = True
if self.mne.data_precomputed:
data = self.mne.data[self.order_idx]
else:
data = self.mne.data[self.range_idx]
# Get decim-specific time if enabled
if self.mne.decim != 1:
times = self.mne.decim_times[self.mne.decim_data[self.range_idx]]
data = data[..., ::self.mne.decim_data[self.range_idx]]
else:
times = self.mne.times
# For multiple color traces with epochs
# replace values from other colors with NaN.
if self.mne.is_epochs:
data = np.copy(data)
check_color = self.mne.epoch_color_ref[self.ch_idx,
self.mne.epoch_idx]
bool_ixs = np.invert(np.equal(self.color, check_color).all(axis=1))
starts = self.mne.boundary_times[self.mne.epoch_idx][bool_ixs]
stops = self.mne.boundary_times[self.mne.epoch_idx + 1][bool_ixs]
for start, stop in zip(starts, stops):
data[np.logical_and(start <= times, times <= stop)] = np.nan
self.setData(times, data, connect=connect, skipFiniteCheck=skip,
antialias=self.mne.antialiasing)
self.setPos(0, self.ypos)
def toggle_bad(self, x=None):
"""Toggle bad status."""
# Toggle bad epoch
if self.mne.is_epochs and x is not None:
epoch_idx, color = self.main._toggle_bad_epoch(x)
# Update epoch color
if color != 'none':
new_epo_color = np.repeat(to_rgba_array(color),
len(self.mne.inst.ch_names), axis=0)
elif self.mne.epoch_colors is None:
new_epo_color = np.concatenate(
[to_rgba_array(c) for c
in self.mne.ch_color_ref.values()])
else:
new_epo_color = \
np.concatenate([to_rgba_array(c) for c in
self.mne.epoch_colors[epoch_idx]])
# Update bad channel colors
bad_idxs = np.in1d(self.mne.ch_names, self.mne.info['bads'])
new_epo_color[bad_idxs] = to_rgba_array(self.mne.ch_color_bad)
self.mne.epoch_color_ref[:, epoch_idx] = new_epo_color
# Update overview-bar
self.mne.overview_bar.update_bad_epochs()
# Update other traces inlcuding self
for trace in self.mne.traces:
trace.update_color()
# Update data is necessary because colored segments will vary
trace.update_data()
# Toggle bad channel
else:
bad_color, pick, marked_bad = self.main._toggle_bad_channel(
self.range_idx)
# Update line color status
self.isbad = not self.isbad
# Update colors for epochs
if self.mne.is_epochs:
if marked_bad:
new_ch_color = np.repeat(to_rgba_array(bad_color),
len(self.mne.inst), axis=0)
elif self.mne.epoch_colors is None:
ch_color = self.mne.ch_color_ref[self.ch_name]
new_ch_color = np.repeat(to_rgba_array(ch_color),
len(self.mne.inst), axis=0)
else:
new_ch_color = np.concatenate([to_rgba_array(c[pick]) for
c in self.mne.epoch_colors])
self.mne.epoch_color_ref[pick, :] = new_ch_color
# Update trace color
self.update_color()
if self.mne.is_epochs:
self.update_data()
# Update channel-axis
self.main._update_yaxis_labels()
# Update overview-bar
self.mne.overview_bar.update_bad_channels()
# Update sensor color (if in selection mode)
if self.mne.fig_selection is not None:
self.mne.fig_selection._update_bad_sensors(pick, marked_bad)
def mouseClickEvent(self, ev):
"""Customize mouse click events."""
if (not self.clickable or ev.button() != Qt.MouseButton.LeftButton
or self.mne.annotation_mode):
# Explicitly ignore events in annotation-mode
ev.ignore()
return
if self.mouseShape().contains(ev.pos()):
ev.accept()
self.toggle_bad(ev.pos().x())
def get_xdata(self):
"""Get xdata for testing."""
return self.xData
def get_ydata(self):
"""Get ydata for testing."""
return self.yData + self.ypos
class TimeAxis(AxisItem):
"""The X-Axis displaying the time."""
def __init__(self, mne):
self.mne = mne
self._spacing = None
super().__init__(orientation='bottom')
def tickValues(self, minVal, maxVal, size):
"""Customize creation of axis values from visible axis range."""
if self.mne.is_epochs:
value_idxs = np.searchsorted(self.mne.midpoints, [minVal, maxVal])
values = self.mne.midpoints[slice(*value_idxs)]
spacing = len(self.mne.inst.times) / self.mne.info['sfreq']
tick_values = [(spacing, values)]
return tick_values
else:
# Save _spacing for later use
self._spacing = self.tickSpacing(minVal, maxVal, size)
return super().tickValues(minVal, maxVal, size)
def tickStrings(self, values, scale, spacing):
"""Customize strings of axis values."""
if self.mne.is_epochs:
epoch_nums = self.mne.inst.selection
ts = epoch_nums[np.searchsorted(self.mne.midpoints, values)]
tick_strings = [str(v) for v in ts]
elif self.mne.time_format == 'clock':
meas_date = self.mne.info['meas_date']
first_time = datetime.timedelta(seconds=self.mne.inst.first_time)
digits = np.ceil(-np.log10(min(v[0] for v in self._spacing)
) + 1).astype(int)
tick_strings = list()
for val in values:
val_time = datetime.timedelta(seconds=val) + \
first_time + meas_date
val_str = val_time.strftime('%H:%M:%S')
if int(val_time.microsecond):
val_str += \
f'{round(val_time.microsecond * 1e-6, digits)}'[1:]
tick_strings.append(val_str)
else:
tick_strings = super().tickStrings(values, scale, spacing)
return tick_strings
def repaint(self):
"""Repaint Time Axis."""
self.picture = None
self.update()
def get_labels(self):
"""Get labels for testing."""
values = self.tickValues(*self.mne.viewbox.viewRange()[0],
self.mne.xmax)
labels = list()
for spacing, vals in values:
labels += self.tickStrings(vals, 1, spacing)
return labels
class ChannelAxis(AxisItem):
"""The Y-Axis displaying the channel-names."""
def __init__(self, main):
self.main = main
self.mne = main.mne
self.ch_texts = OrderedDict()
super().__init__(orientation='left')
self.style['autoReduceTextSpace'] = False
def tickValues(self, minVal, maxVal, size):
"""Customize creation of axis values from visible axis range."""
minVal, maxVal = sorted((minVal, maxVal))
values = list(range(round(minVal) + 1, round(maxVal)))
tick_values = [(1, values)]
return tick_values
def tickStrings(self, values, scale, spacing):
"""Customize strings of axis values."""
# Get channel-names
if self.mne.butterfly and self.mne.fig_selection is not None:
tick_strings = list(self.main._make_butterfly_selections_dict())
elif self.mne.butterfly:
_, ixs, _ = np.intersect1d(_DATA_CH_TYPES_ORDER_DEFAULT,
self.mne.ch_types, return_indices=True)
ixs.sort()
tick_strings = np.array(_DATA_CH_TYPES_ORDER_DEFAULT)[ixs]
else:
# Get channel-names and by substracting 1 from tick-values
# since the first channel starts at y=1.
tick_strings = self.mne.ch_names[
self.mne.ch_order[[v - 1 for v in values]]]
return tick_strings
def drawPicture(self, p, axisSpec, tickSpecs, textSpecs):
"""Customize drawing of axis items."""
super().drawPicture(p, axisSpec, tickSpecs, textSpecs)
for rect, flags, text in textSpecs:
if self.mne.butterfly and self.mne.fig_selection is not None:
p.setPen(_get_color('black'))
elif self.mne.butterfly:
p.setPen(_get_color(self.mne.ch_color_dict[text]))
elif text in self.mne.info['bads']:
p.setPen(_get_color(self.mne.ch_color_bad))
else:
p.setPen(_get_color(self.mne.ch_color_ref[text]))
self.ch_texts[text] = ((rect.left(), rect.left() + rect.width()),
(rect.top(), rect.top() + rect.height()))
p.drawText(rect, int(flags), text)
def repaint(self):
"""Repaint Channel Axis."""
self.picture = None
self.update()
def mouseClickEvent(self, event):
"""Customize mouse click events."""
# Clean up channel-texts
if not self.mne.butterfly:
self.ch_texts = {k: v for k, v in self.ch_texts.items()
if k in [tr.ch_name for tr in self.mne.traces]}
# Get channel-name from position of channel-description
ypos = event.scenePos().y()
y_values = np.asarray(list(self.ch_texts.values()))[:, 1, :]
y_diff = np.abs(y_values - ypos)
ch_idx = int(np.argmin(y_diff, axis=0)[0])
ch_name = list(self.ch_texts.keys())[ch_idx]
trace = [tr for tr in self.mne.traces
if tr.ch_name == ch_name][0]
if event.button() == Qt.LeftButton:
trace.toggle_bad()
elif event.button() == Qt.RightButton:
self.main._create_ch_context_fig(trace.range_idx)
def get_labels(self):
"""Get labels for testing."""
values = self.tickValues(*self.mne.viewbox.viewRange()[1], None)
labels = self.tickStrings(values[0][1], None, None)
return labels
class BaseScrollBar(QScrollBar):
"""Base Class for scrolling directly to the clicked position."""
def __init__(self, parent=None):
super().__init__(parent)
def mousePressEvent(self, event):
"""Customize mouse click events.
Taken from: https://stackoverflow.com/questions/29710327/
how-to-override-qscrollbar-onclick-default-behaviour
"""
if event.button() == Qt.LeftButton:
opt = QStyleOptionSlider()
self.initStyleOption(opt)
control = self.style().hitTestComplexControl(
QStyle.CC_ScrollBar, opt,
event.pos(), self)
if (control == QStyle.SC_ScrollBarAddPage or
control == QStyle.SC_ScrollBarSubPage):
# scroll here
gr = self.style().subControlRect(QStyle.CC_ScrollBar,
opt,
QStyle.SC_ScrollBarGroove,
self)
sr = self.style().subControlRect(QStyle.CC_ScrollBar,
opt,
QStyle.SC_ScrollBarSlider,
self)
if self.orientation() == Qt.Horizontal:
pos = event.pos().x()
sliderLength = sr.width()
sliderMin = gr.x()
sliderMax = gr.right() - sliderLength + 1
if (self.layoutDirection() == Qt.RightToLeft):
opt.upsideDown = not opt.upsideDown
else:
pos = event.pos().y()
sliderLength = sr.height()
sliderMin = gr.y()
sliderMax = gr.bottom() - sliderLength + 1
self.setValue(QStyle.sliderValueFromPosition(
self.minimum(), self.maximum(),
pos - sliderMin, sliderMax - sliderMin,
opt.upsideDown))
return
return super().mousePressEvent(event)
class TimeScrollBar(BaseScrollBar):
"""Scrolls through time."""
def __init__(self, mne):
super().__init__(Qt.Horizontal)
self.mne = mne
self.step_factor = 1
self.setMinimum(0)
self.setSingleStep(1)
self.update_duration()
self.setFocusPolicy(Qt.WheelFocus)
# Because valueChanged is needed (captures every input to scrollbar,
# not just sliderMoved), there has to be made a differentiation
# between internal and external changes.
self.external_change = False
self.valueChanged.connect(self._time_changed)
def _time_changed(self, value):
if not self.external_change:
if self.mne.is_epochs:
# Convert Epoch index to time
value = self.mne.boundary_times[int(value)]
else:
value /= self.step_factor
self.mne.plt.setXRange(value, value + self.mne.duration,
padding=0)
def update_value(self, value):
"""Update value of the ScrollBar."""
# Mark change as external to avoid setting
# XRange again in _time_changed.
self.external_change = True
if self.mne.is_epochs:
set_value = np.searchsorted(self.mne.midpoints, value)
else:
set_value = int(value * self.step_factor)
self.setValue(set_value)
self.external_change = False
def update_duration(self):
"""Update bar size."""
if self.mne.is_epochs:
self.setPageStep(self.mne.n_epochs)
self.setMaximum(len(self.mne.inst) - self.mne.n_epochs)
else:
self.setPageStep(int(self.mne.duration))
self.step_factor = self.mne.scroll_sensitivity / self.mne.duration
self.setMaximum(int((self.mne.xmax - self.mne.duration)
* self.step_factor))
def _update_scroll_sensitivity(self):
self.update_duration()
self.update_value(self.value() / self.step_factor)
def keyPressEvent(self, event):
"""Customize key press events."""
# Let main handle the keypress
event.ignore()
class ChannelScrollBar(BaseScrollBar):
"""Scrolls through channels."""
def __init__(self, mne):
super().__init__(Qt.Vertical)
self.mne = mne
self.setMinimum(0)
self.setSingleStep(1)
self.update_nchan()
self.setFocusPolicy(Qt.WheelFocus)
# Because valueChanged is needed (captures every input to scrollbar,
# not just sliderMoved), there has to be made a differentiation
# between internal and external changes.
self.external_change = False
self.valueChanged.connect(self._channel_changed)
def _channel_changed(self, value):
if not self.external_change:
if self.mne.fig_selection:
label = list(self.mne.ch_selections.keys())[value]
self.mne.fig_selection._chkbx_changed(label)
elif not self.mne.butterfly:
value = min(value, self.mne.ymax - self.mne.n_channels)
self.mne.plt.setYRange(value, value + self.mne.n_channels + 1,
padding=0)
def update_value(self, value):
"""Update value of the ScrollBar."""
# Mark change as external to avoid setting YRange again in
# _channel_changed.
self.external_change = True
self.setValue(value)
self.external_change = False
def update_nchan(self):
"""Update bar size."""
if getattr(self.mne, 'group_by', None) in ['position', 'selection']:
self.setPageStep(1)
self.setMaximum(len(self.mne.ch_selections) - 1)
else:
self.setPageStep(self.mne.n_channels)
self.setMaximum(self.mne.ymax - self.mne.n_channels - 1)
def keyPressEvent(self, event):
"""Customize key press events."""
# Let main handle the keypress
event.ignore()
class OverviewBar(QGraphicsView):
"""
Provides overview over channels and current visible range.
Has different modes:
- channels: Display channel-types
- zscore: Display channel-wise zscore across time
"""
def __init__(self, main):
super().__init__(QGraphicsScene())
self.main = main
self.mne = main.mne
self.bg_img = None
self.bg_pxmp = None
self.bg_pxmp_item = None
# Set minimum Size to 1/10 of display size
min_h = int(QApplication.desktop().screenGeometry().height() / 10)
self.setMinimumSize(1, 1)
self.setFixedHeight(min_h)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.set_background()
# Initialize Graphics-Items
# Bad channels
self.bad_line_dict = dict()
self.update_bad_channels()
# Events
self.event_line_dict = dict()
self.update_events()
if self.mne.is_epochs:
# Epochs Lines
self.epoch_line_dict = dict()
self.update_epoch_lines()
self.bad_epoch_rect_dict = dict()
self.update_bad_epochs()
else:
# Annotations
self.annotations_rect_dict = dict()
self.update_annotations()
# VLine
self.v_line = None
self.update_vline()
# View Range
self.viewrange_rect = None
self.update_viewrange()
def update_epoch_lines(self):
"""Update representation of epoch lines."""
epoch_line_pen = mkPen(color='k', width=1)
for t in self.mne.boundary_times[1:-1]:
top_left = self._mapFromData(t, 0)
bottom_right = self._mapFromData(t, len(self.mne.ch_order))
line = self.scene().addLine(QLineF(top_left, bottom_right),
epoch_line_pen)
line.setZValue(1)
self.epoch_line_dict[t] = line
def update_bad_channels(self):
"""Update representation of bad channels."""
bad_set = set(self.mne.info['bads'])
line_set = set(self.bad_line_dict.keys())
add_chs = bad_set.difference(line_set)
rm_chs = line_set.difference(bad_set)
for line_idx, ch_idx in enumerate(self.mne.ch_order):
ch_name = self.mne.ch_names[ch_idx]
if ch_name in add_chs:
start = self._mapFromData(0, line_idx)
stop = self._mapFromData(self.mne.inst.times[-1], line_idx)
pen = _get_color(self.mne.ch_color_bad)
line = self.scene().addLine(QLineF(start, stop), pen)
line.setZValue(2)
self.bad_line_dict[ch_name] = line
elif ch_name in rm_chs:
self.scene().removeItem(self.bad_line_dict[ch_name])
self.bad_line_dict.pop(ch_name)
def update_bad_epochs(self):
bad_set = set(self.mne.bad_epochs)
rect_set = set(self.bad_epoch_rect_dict.keys())
add_epos = bad_set.difference(rect_set)
rm_epos = rect_set.difference(bad_set)
for epo_num in self.mne.inst.selection:
if epo_num in add_epos:
epo_idx = self.mne.inst.selection.tolist().index(epo_num)
start, stop = self.mne.boundary_times[epo_idx:epo_idx + 2]
top_left = self._mapFromData(start, 0)
bottom_right = self._mapFromData(stop, len(self.mne.ch_order))
pen = _get_color(self.mne.epoch_color_bad)
rect = self.scene().addRect(QRectF(top_left, bottom_right),
pen=pen, brush=pen)
rect.setZValue(3)
self.bad_epoch_rect_dict[epo_num] = rect
elif epo_num in rm_epos:
self.scene().removeItem(self.bad_epoch_rect_dict[epo_num])
self.bad_epoch_rect_dict.pop(epo_num)
def update_events(self):
"""Update representation of events."""
if getattr(self.mne, 'event_nums', None) is not None \
and self.mne.events_visible:
for ev_t, ev_id in zip(self.mne.event_times, self.mne.event_nums):
color_name = self.mne.event_color_dict[ev_id]
color = _get_color(color_name)
color.setAlpha(100)
pen = mkPen(color)
top_left = self._mapFromData(ev_t, 0)
bottom_right = self._mapFromData(ev_t, len(self.mne.ch_order))
line = self.scene().addLine(QLineF(top_left, bottom_right),
pen)
line.setZValue(1)
self.event_line_dict[ev_t] = line
else:
for event_line in self.event_line_dict.values():
self.scene().removeItem(event_line)
self.event_line_dict.clear()
def update_annotations(self):
"""Update representation of annotations."""
annotations = self.mne.inst.annotations
# Exclude non-visible annotations
annot_set = set([annot['onset'] for annot in annotations if
self.mne.visible_annotations[annot['description']]])
rect_set = set(self.annotations_rect_dict.keys())
add_onsets = annot_set.difference(rect_set)
rm_onsets = rect_set.difference(annot_set)
# Add missing onsets
for add_onset in add_onsets:
plot_onset = _sync_onset(self.mne.inst, add_onset)
annot_idx = np.argwhere(self.mne.inst.annotations.onset
== add_onset)[0][0]
duration = annotations.duration[annot_idx]
description = annotations.description[annot_idx]
color_name = self.mne.annotation_segment_colors[description]
color = _get_color(color_name)
color.setAlpha(150)
pen = mkPen(color)
brush = mkBrush(color)
top_left = self._mapFromData(plot_onset, 0)
bottom_right = self._mapFromData(plot_onset + duration,
len(self.mne.ch_order))
rect = self.scene().addRect(QRectF(top_left, bottom_right),
pen, brush)
rect.setZValue(3)
self.annotations_rect_dict[add_onset] = {'rect': rect,
'plot_onset': plot_onset,
'duration': duration,
'color': color_name}
# Remove onsets
for rm_onset in rm_onsets:
self.scene().removeItem(self.annotations_rect_dict[rm_onset]
['rect'])
self.annotations_rect_dict.pop(rm_onset)
# Changes
for edit_onset in self.annotations_rect_dict:
plot_onset = _sync_onset(self.mne.inst, edit_onset)
annot_idx = np.where(annotations.onset == edit_onset)[0][0]
duration = annotations.duration[annot_idx]
rect_duration = self.annotations_rect_dict[edit_onset]['duration']
rect = self.annotations_rect_dict[edit_onset]['rect']
# Update changed duration
if duration != rect_duration:
self.annotations_rect_dict[edit_onset]['duration'] = duration
top_left = self._mapFromData(plot_onset, 0)
bottom_right = self._mapFromData(plot_onset + duration,
len(self.mne.ch_order))
rect.setRect(QRectF(top_left, bottom_right))
# Update changed color
description = annotations.description[annot_idx]
color_name = self.mne.annotation_segment_colors[description]
rect_color = self.annotations_rect_dict[edit_onset]['color']
if color_name != rect_color:
color = _get_color(color_name)
color.setAlpha(150)
pen = mkPen(color)
brush = mkBrush(color)
rect.setPen(pen)
rect.setBrush(brush)
def update_vline(self):
"""Update representation of vline."""
if self.mne.is_epochs:
# VLine representation not useful in epochs-mode
pass
# Add VLine-Representation
elif self.mne.vline is not None:
value = self.mne.vline.value()
top_left = self._mapFromData(value, 0)
bottom_right = self._mapFromData(value, len(self.mne.ch_order))
line = QLineF(top_left, bottom_right)
if self.v_line is None:
pen = mkPen('g')
self.v_line = self.scene().addLine(line, pen)
self.v_line.setZValue(1)
else:
self.v_line.setLine(line)
# Remove VLine-Representation
elif self.v_line is not None:
self.scene().removeItem(self.v_line)
self.v_line = None
def update_viewrange(self):
"""Update representation of viewrange."""
if self.mne.butterfly:
top_left = self._mapFromData(self.mne.t_start, 0)
bottom_right = self._mapFromData(self.mne.t_start +
self.mne.duration, self.mne.ymax)
else:
top_left = self._mapFromData(self.mne.t_start, self.mne.ch_start)
bottom_right = self._mapFromData(self.mne.t_start
+ self.mne.duration,
self.mne.ch_start
+ self.mne.n_channels)
rect = QRectF(top_left, bottom_right)
if self.viewrange_rect is None:
pen = mkPen(color='g')
brush = mkBrush(color=(0, 0, 0, 100))
self.viewrange_rect = self.scene().addRect(rect, pen, brush)
self.viewrange_rect.setZValue(4)
else:
self.viewrange_rect.setRect(rect)
def _set_range_from_pos(self, pos):
x, y = self._mapToData(pos)
# Set X
# Check boundaries
if self.mne.is_epochs:
if x == '-offbounds':
epo_idx = 0
elif x == '+offbounds':
epo_idx = len(self.mne.inst) - self.mne.n_epochs
else:
epo_idx = max(x - self.mne.n_epochs // 2, 0)
x = self.mne.boundary_times[epo_idx]
elif x == '-offbounds':
x = 0
elif x == '+offbounds':
x = self.mne.xmax - self.mne.duration
else:
# Move click position to middle of view range
x -= self.mne.duration / 2
xmin = np.clip(x, 0, self.mne.xmax - self.mne.duration)
xmax = np.clip(xmin + self.mne.duration,
self.mne.duration, self.mne.xmax)
self.mne.plt.setXRange(xmin, xmax, padding=0)
# Set Y
if y == '-offbounds':
y = 0
elif y == '+offbounds':
y = self.mne.ymax - (self.mne.n_channels + 1)
else:
# Move click position to middle of view range
y -= self.mne.n_channels / 2
ymin = np.clip(y, 0, self.mne.ymax - (self.mne.n_channels + 1))
ymax = np.clip(ymin + self.mne.n_channels + 1,
self.mne.n_channels, self.mne.ymax)
# Check boundaries
if self.mne.fig_selection:
self.mne.fig_selection._scroll_to_idx(int(ymin))
else:
self.mne.plt.setYRange(ymin, ymax, padding=0)
def mousePressEvent(self, event):
"""Customize mouse press events."""
self._set_range_from_pos(event.pos())
def mouseMoveEvent(self, event):
"""Customize mouse move events."""
self._set_range_from_pos(event.pos())
def _fit_bg_img(self):
# Remove previous item from scene
if (self.bg_pxmp_item is not None and
self.bg_pxmp_item in self.scene().items()):
self.scene().removeItem(self.bg_pxmp_item)
# Resize Pixmap
if self.bg_pxmp is not None:
cnt_rect = self.contentsRect()
self.bg_pxmp = self.bg_pxmp.scaled(cnt_rect.width(),
cnt_rect.height(),
Qt.IgnoreAspectRatio)
self.bg_pxmp_item = self.scene().addPixmap(self.bg_pxmp)
def resizeEvent(self, event):
"""Customize resize event."""
super().resizeEvent(event)
cnt_rect = self.contentsRect()
self.setSceneRect(QRectF(QPoint(0, 0),
QPoint(cnt_rect.width(),
cnt_rect.height())))
# Resize backgounrd
self._fit_bg_img()
# Resize Graphics Items (assuming height never changes)
# Resize bad_channels
for bad_ch_line in self.bad_line_dict.values():
current_line = bad_ch_line.line()
bad_ch_line.setLine(QLineF(current_line.p1(),
Point(cnt_rect.width(),
current_line.y2())))
# Resize event-lines
for ev_t, event_line in self.event_line_dict.items():
top_left = self._mapFromData(ev_t, 0)
bottom_right = self._mapFromData(ev_t, len(self.mne.ch_order))
event_line.setLine(QLineF(top_left, bottom_right))
if self.mne.is_epochs:
# Resize epoch lines
for epo_t, epoch_line in self.epoch_line_dict.items():
top_left = self._mapFromData(epo_t, 0)
bottom_right = self._mapFromData(epo_t,
len(self.mne.ch_order))
epoch_line.setLine(QLineF(top_left, bottom_right))
# Resize bad rects
for epo_idx, epoch_rect in self.bad_epoch_rect_dict.items():
start, stop = self.mne.boundary_times[epo_idx:epo_idx + 2]
top_left = self._mapFromData(start, 0)
bottom_right = self._mapFromData(stop, len(self.mne.ch_order))
epoch_rect.setRect(QRectF(top_left, bottom_right))
else:
# Resize annotation-rects
for annot_dict in self.annotations_rect_dict.values():
annot_rect = annot_dict['rect']
plot_onset = annot_dict['plot_onset']
duration = annot_dict['duration']
top_left = self._mapFromData(plot_onset, 0)
bottom_right = self._mapFromData(plot_onset + duration,
len(self.mne.ch_order))
annot_rect.setRect(QRectF(top_left, bottom_right))
# Update vline
if all([i is not None for i in [self.v_line, self.mne.vline]]):
value = self.mne.vline.value()
top_left = self._mapFromData(value, 0)
bottom_right = self._mapFromData(value, len(self.mne.ch_order))
self.v_line.setLine(QLineF(top_left, bottom_right))
# Update viewrange-rect
top_left = self._mapFromData(self.mne.t_start, self.mne.ch_start)
bottom_right = self._mapFromData(self.mne.t_start
+ self.mne.duration,
self.mne.ch_start
+ self.mne.n_channels)
self.viewrange_rect.setRect(QRectF(top_left, bottom_right))
def set_background(self):
"""Set the background-image for the selected overview-mode."""
# Add Overview-Pixmap
if self.mne.overview_mode == 'empty':
self.bg_pxmp = None
elif self.mne.overview_mode == 'channels':
channel_rgba = np.empty((len(self.mne.ch_order),
2, 4))
for line_idx, ch_idx in enumerate(self.mne.ch_order):
ch_type = self.mne.ch_types[ch_idx]
color = _get_color(self.mne.ch_color_dict[ch_type])
channel_rgba[line_idx, :] = color.getRgb()
channel_rgba = np.require(channel_rgba, np.uint8, 'C')
self.bg_img = QImage(channel_rgba,
channel_rgba.shape[1],
channel_rgba.shape[0],
QImage.Format_RGBA8888)
self.bg_pxmp = QPixmap.fromImage(self.bg_img)
elif self.mne.overview_mode == 'zscore':
self.bg_img = QImage(self.mne.zscore_rgba,
self.mne.zscore_rgba.shape[1],
self.mne.zscore_rgba.shape[0],
QImage.Format_RGBA8888)
self.bg_pxmp = QPixmap.fromImage(self.bg_img)
self._fit_bg_img()
def _mapFromData(self, x, y):
# Include padding from black frame
point_x = self.width() * x / self.mne.xmax
point_y = self.height() * y / len(self.mne.ch_order)
return Point(point_x, point_y)
def _mapToData(self, point):
# Include padding from black frame
xnorm = point.x() / self.width()
if xnorm < 0:
x = '-offbounds'
elif xnorm > 1:
x = '+offbounds'
else:
if self.mne.is_epochs:
# Return epoch index for epochs
x = int(len(self.mne.inst) * xnorm)
else:
time_idx = int((len(self.mne.inst.times) - 1) * xnorm)
x = self.mne.inst.times[time_idx]
ynorm = point.y() / self.height()
if ynorm < 0:
y = '-offbounds'
elif ynorm > 1:
y = '+offbounds'
else:
y = len(self.mne.ch_order) * ynorm
return x, y
def keyPressEvent(self, event):
self.main.keyPressEvent(event)
class RawViewBox(ViewBox):
"""PyQtGraph-Wrapper for interaction with the View."""
def __init__(self, main):
super().__init__(invertY=True)
self.enableAutoRange(enable=False, x=False, y=False)
self.main = main
self.mne = main.mne
self._drag_start = None
self._drag_region = None
def mouseDragEvent(self, event, axis=None):
"""Customize mouse drag events."""
event.accept()
if event.button() == Qt.LeftButton \
and self.mne.annotation_mode:
if self.mne.current_description:
description = self.mne.current_description
if event.isStart():
self._drag_start = self.mapSceneToView(
event.lastScenePos()).x()
drag_stop = self.mapSceneToView(event.scenePos()).x()
self._drag_region = AnnotRegion(self.mne,
description=description,
values=(self._drag_start,
drag_stop))
self.mne.plt.addItem(self._drag_region)
self.mne.plt.addItem(self._drag_region.label_item)
elif event.isFinish():
drag_stop = self.mapSceneToView(event.scenePos()).x()
self._drag_region.setRegion((self._drag_start, drag_stop))
plot_onset = min(self._drag_start, drag_stop)
plot_offset = max(self._drag_start, drag_stop)
duration = abs(self._drag_start - drag_stop)
# Add to annotations
onset = _sync_onset(self.mne.inst, plot_onset,
inverse=True)
_merge_annotations(onset, onset + duration,
self.mne.current_description,
self.mne.inst.annotations)
# Add to regions/merge regions
merge_values = [plot_onset, plot_offset]
rm_regions = list()
for region in [r for r in self.mne.regions
if r.description ==
self.mne.current_description]:
values = region.getRegion()
if any([plot_onset < val < plot_offset for val in
values]):
merge_values += values
rm_regions.append(region)
if len(merge_values) > 2:
self._drag_region.setRegion((min(merge_values),
max(merge_values)))
for rm_region in rm_regions:
self.main._remove_region(rm_region, from_annot=False)
self.main._add_region(plot_onset, duration,
self.mne.current_description,
self._drag_region)
self._drag_region.select(True)
# Update Overview-Bar
self.mne.overview_bar.update_annotations()
else:
x_to = self.mapSceneToView(event.scenePos()).x()
self._drag_region.setRegion((self._drag_start, x_to))
elif event.isFinish():
self.main.message_box(text='No description!',
info_text='No description is given, '
'add one!',
icon=QMessageBox.Warning)
def mouseClickEvent(self, event):
"""Customize mouse click events."""
# If we want the context-menu back, uncomment following line
# super().mouseClickEvent(event)
if not self.mne.annotation_mode:
if event.button() == Qt.LeftButton:
self.main._add_vline(self.mapSceneToView(
event.scenePos()).x())
elif event.button() == Qt.RightButton:
self.main._remove_vline()
def wheelEvent(self, ev, axis=None):
"""Customize mouse wheel/trackpad-scroll events."""
ev.accept()
scroll = -1 * ev.delta() / 120
if ev.orientation() == Qt.Horizontal:
self.main.hscroll(scroll * 10)
elif ev.orientation() == Qt.Vertical:
self.main.vscroll(scroll)
def keyPressEvent(self, event):
self.main.keyPressEvent(event)
class VLineLabel(InfLineLabel):
"""Label of the vline displaying the time."""
def __init__(self, vline):
super().__init__(vline, text='{value:.3f} s', position=0.98,
fill='g', color='b', movable=True)
self.cursorOffset = None
def mouseDragEvent(self, ev):
"""Customize mouse drag events."""
if self.movable and ev.button() == Qt.LeftButton:
if ev.isStart():
self.line.moving = True
self.cursorOffset = (self.line.pos() -
self.mapToView(ev.buttonDownPos()))
ev.accept()
if not self.line.moving:
return
self.line.setPos(self.cursorOffset + self.mapToView(ev.pos()))
self.line.sigDragged.emit(self)
if ev.isFinish():
self.line.moving = False
self.line.sigPositionChangeFinished.emit(self.line)
def valueChanged(self):
"""Customize what happens on value change."""
if not self.isVisible():
return
value = self.line.value()
if self.line.mne.is_epochs:
# Show epoch-time
t_vals_abs = np.linspace(0, self.line.mne.epoch_dur,
len(self.line.mne.inst.times))
search_val = value % self.line.mne.epoch_dur
t_idx = np.searchsorted(t_vals_abs, search_val)
value = self.line.mne.inst.times[t_idx]
self.setText(self.format.format(value=value))
self.updatePosition()
class VLine(InfiniteLine):
"""Marker to be placed inside the Trace-Plot."""
def __init__(self, mne, pos, bounds):
super().__init__(pos, pen='g', hoverPen='y',
movable=True, bounds=bounds)
self.mne = mne
self.label = VLineLabel(self)
class EventLine(InfiniteLine):
"""Displays Events inside Trace-Plot"""
def __init__(self, pos, id, color):
super().__init__(pos, pen=color, movable=False,
label=str(id), labelOpts={'position': 0.98,
'color': color,
'anchors': [(0, 0.5),
(0, 0.5)]})
self.label.setFont(QFont('AnyStyle', 10, QFont.Bold))
self.setZValue(0)
class Crosshair(InfiniteLine):
"""Continously updating marker inside the Trace-Plot."""
def __init__(self):
super().__init__(angle=90, movable=False, pen='g')
self.y = 1
def set_data(self, x, y):
"""Set x and y data for crosshair point."""
self.setPos(x)
self.y = y
def paint(self, p, *args):
super().paint(p, *args)
p.setPen(mkPen('r', width=4))
p.drawPoint(Point(self.y, 0))
class BaseScaleBar:
def __init__(self, mne, ch_type):
self.mne = mne
self.ch_type = ch_type
self.ypos = None
def _set_position(self, x, y):
pass
def _is_visible(self):
return self.ch_type in self.mne.ch_types[self.mne.picks]
def _get_ypos(self):
if self.mne.butterfly:
self.ypos = self.mne.butterfly_type_order.index(self.ch_type) + 1
else:
ch_type_idxs = np.where(self.mne.ch_types[self.mne.picks]
== self.ch_type)[0]
for idx in ch_type_idxs:
ch_name = self.mne.ch_names[self.mne.picks[idx]]
if ch_name not in self.mne.info['bads'] and \
ch_name not in self.mne.whitened_ch_names:
self.ypos = self.mne.ch_start + idx + 1
break
# Consider all indices bad
if self.ypos is None:
self.ypos = self.mne.ch_start + ch_type_idxs[0] + 1
def update_x_position(self):
"""Update x-position of Scalebar."""
if self._is_visible():
if self.ypos is None:
self._get_ypos()
self._set_position(self.mne.t_start, self.ypos)
def update_y_position(self):
"""Update y-position of Scalebar."""
if self._is_visible():
self.setVisible(True)
self._get_ypos()
self._set_position(self.mne.t_start, self.ypos)
else:
self.setVisible(False)
class ScaleBarText(BaseScaleBar, TextItem):
def __init__(self, mne, ch_type):
BaseScaleBar.__init__(self, mne, ch_type)
TextItem.__init__(self, color='#AA3377')
self.setFont(QFont('AnyStyle', 10))
self.setZValue(2) # To draw over RawTraceItems
self.update_value()
self.update_y_position()
def update_value(self):
"""Update value of ScaleBarText."""
scaler = 1 if self.mne.butterfly else 2
inv_norm = (scaler *
self.mne.scalings[self.ch_type] *
self.mne.unit_scalings[self.ch_type] /
self.mne.scale_factor)
self.setText(f'{_simplify_float(inv_norm)} '
f'{self.mne.units[self.ch_type]}')
def _set_position(self, x, y):
self.setPos(x, y)
class ScaleBar(BaseScaleBar, QGraphicsLineItem):
def __init__(self, mne, ch_type):
BaseScaleBar.__init__(self, mne, ch_type)
QGraphicsLineItem.__init__(self)
self.setZValue(1)
self.setPen(mkPen(color='#AA3377', width=5))
self.update_y_position()
def _set_position(self, x, y):
self.setLine(QLineF(x, y - 0.5, x, y + 0.5))
def get_ydata(self):
"""Get y-data for tests."""
line = self.line()
return line.y1(), line.y2()
class _BaseDialog(QDialog):
def __init__(self, main, widget=None,
modal=False, name=None, title=None):
super().__init__(main)
self.main = main
self.widget = widget
self.mne = main.mne
self.name = name
self.modal = modal
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.mne.child_figs.append(self)
if self.name is not None:
setattr(self.mne, self.name, self)
if title is not None:
self.setWindowTitle(title)
if self.widget is not None:
layout = QVBoxLayout()
layout.addWidget(self.widget)
self.setLayout(layout)
def show(self, center=True):
if self.modal:
self.open()
else:
super().show()
if center:
# center dialog
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.close()
else:
self.parent().keyPressEvent(event)
def closeEvent(self, event):
if hasattr(self, 'name') and hasattr(self, 'mne'):
if self.name is not None and hasattr(self.mne, self.name):
setattr(self.mne, self.name, None)
if self in self.mne.child_figs:
self.mne.child_figs.remove(self)
event.accept()
class SettingsDialog(_BaseDialog):
"""Shows additional settings."""
def __init__(self, main, **kwargs):
super().__init__(main, **kwargs)
layout = QFormLayout()
self.downsampling_box = QSpinBox()
self.downsampling_box.setToolTip('Set an integer as the downsampling'
' factor or "Auto" to get the factor'
' from the visible range.\n'
' Setting the factor 1 means no '
'downsampling.\n'
' Default is 1.')
self.downsampling_box.setMinimum(0)
self.downsampling_box.setSpecialValueText('Auto')
self.downsampling_box.valueChanged.connect(partial(
self._value_changed, value_name='downsampling'))
self.downsampling_box.setValue(0 if self.mne.downsampling == 'auto'
else self.mne.downsampling)
layout.addRow('downsampling', self.downsampling_box)
self.ds_method_cmbx = QComboBox()
self.ds_method_cmbx.setToolTip(
'<h2>Downsampling Method</h2>'
'<ul>'
'<li>subsample:<br>'
'Only take every n-th sample.</li>'
'<li>mean:<br>'
'Take the mean of n samples.</li>'
'<li>peak:<br>'
'Draws a saw wave from the minimum to the maximum from a '
'collection of n samples.</li>'
'</ul>'
'<i>(Those methods are adapted from '
'pyqtgraph)</i><br>'
'Default is "peak".')
self.ds_method_cmbx.addItems(['subsample', 'mean', 'peak'])
self.ds_method_cmbx.currentTextChanged.connect(partial(
self._value_changed, value_name='ds_method'))
self.ds_method_cmbx.setCurrentText(
self.mne.ds_method)
layout.addRow('ds_method', self.ds_method_cmbx)
self.scroll_sensitivity_slider = QSlider(Qt.Horizontal)
self.scroll_sensitivity_slider.setMinimum(10)
self.scroll_sensitivity_slider.setMaximum(1000)
self.scroll_sensitivity_slider.setToolTip('Set the sensitivity of '
'the scrolling in '
'horizontal direction.')
self.scroll_sensitivity_slider.valueChanged.connect(partial(
self._value_changed, value_name='scroll_sensitivity'))
# Set default
self.scroll_sensitivity_slider.setValue(self.mne.scroll_sensitivity)
layout.addRow('horizontal scroll sensitivity',
self.scroll_sensitivity_slider)
self.setLayout(layout)
self.show()
def closeEvent(self):
_disconnect(self.ds_method_cmbx.currentTextChanged)
_disconnect(self.scroll_sensitivity_slider.valueChanged)
super.closeEvent()
def _value_changed(self, new_value, value_name):
if value_name == 'downsampling' and new_value == 0:
new_value = 'auto'
setattr(self.mne, value_name, new_value)
if value_name == 'scroll_sensitivity':
self.mne.ax_hscroll._update_scroll_sensitivity()
else:
self.main._redraw()
class HelpDialog(_BaseDialog):
"""Shows all keyboard-shortcuts."""
def __init__(self, main, **kwargs):
super().__init__(main, **kwargs)
# Show all keyboard-shortcuts in a Scroll-Area
layout = QVBoxLayout()
keyboard_label = QLabel('Keyboard Shortcuts')
keyboard_label.setFont(QFont('AnyStyle', 16, QFont.Bold))
layout.addWidget(keyboard_label)
scroll_area = QScrollArea()
scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll_area.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.MinimumExpanding)
scroll_widget = QWidget()
form_layout = QFormLayout()
for key in main.mne.keyboard_shortcuts:
key_dict = main.mne.keyboard_shortcuts[key]
if 'description' in key_dict:
if 'alias' in key_dict:
key = key_dict['alias']
for idx, key_des in enumerate(key_dict['description']):
key_name = key
if 'modifier' in key_dict:
mod = key_dict['modifier'][idx]
if mod is not None:
key_name = mod + ' + ' + key_name
form_layout.addRow(key_name, QLabel(key_des))
scroll_widget.setLayout(form_layout)
scroll_area.setWidget(scroll_widget)
layout.addWidget(scroll_area)
# Additional help for mouse interaction
inst = self.main.mne.instance_type
is_raw = inst == 'raw'
is_epo = inst == 'epochs'
is_ica = inst == 'ica'
ch_cmp = 'component' if is_ica else 'channel'
ch_epo = 'epoch' if is_epo else 'channel'
ica_bad = 'Mark/unmark component for exclusion'
lclick_data = ica_bad if is_ica else f'Mark/unmark bad {ch_epo}'
lclick_name = (ica_bad if is_ica else 'Mark/unmark bad channel')
ldrag = 'add annotation (in annotation mode)' if is_raw else None
rclick_name = dict(ica='Show diagnostics for component',
epochs='Show imageplot for channel',
raw='Show channel location')[inst]
mouse_help = [(f'Left-click {ch_cmp} name', lclick_name),
(f'Left-click {ch_cmp} data', lclick_data),
('Left-click-and-drag on plot', ldrag),
('Left-click on plot background',
'Place vertical guide'),
('Right-click on plot background',
'Clear vertical guide'),
('Right-click on channel name', rclick_name)]
mouse_label = QLabel('Mouse Interaction')
mouse_label.setFont(QFont('AnyStyle', 16, QFont.Bold))
layout.addWidget(mouse_label)
mouse_widget = QWidget()
mouse_layout = QFormLayout()
for interaction, description in mouse_help:
if description is not None:
mouse_layout.addRow(f'{interaction}:', QLabel(description))
mouse_widget.setLayout(mouse_layout)
layout.addWidget(mouse_widget)
self.setLayout(layout)
self.show()
# Set minimum width to avoid horizontal scrolling
scroll_area.setMinimumWidth(scroll_widget.minimumSizeHint().width() +
scroll_area.verticalScrollBar().width())
self.update()
class ProjDialog(_BaseDialog):
"""A dialog to toggle projections."""
def __init__(self, main, **kwargs):
self.external_change = True
# Create projection-layout
super().__init__(main, **kwargs)
layout = QVBoxLayout()
labels = [p['desc'] for p in self.mne.projs]
for ix, active in enumerate(self.mne.projs_active):
if active:
labels[ix] += ' (already applied)'
# make title
layout.addWidget(QLabel('Mark projectors applied on the plot.\n'
'(Applied projectors are dimmed).'))
# Add checkboxes
self.checkboxes = list()
for idx, label in enumerate(labels):
chkbx = QCheckBox(label)
chkbx.setChecked(bool(self.mne.projs_on[idx]))
chkbx.clicked.connect(partial(self._proj_changed, idx=idx))
if self.mne.projs_active[idx]:
chkbx.setEnabled(False)
self.checkboxes.append(chkbx)
layout.addWidget(chkbx)
self.toggle_all_bt = QPushButton('Toggle All')
self.toggle_all_bt.clicked.connect(self.toggle_all)
layout.addWidget(self.toggle_all_bt)
self.setLayout(layout)
self.show()
def _proj_changed(self, state, idx):
# Only change if proj wasn't already applied.
if not self.mne.projs_active[idx]:
self.mne.projs_on[idx] = state
self.main._apply_update_projectors()
def toggle_all(self):
"""Toggle all projectors."""
self.main._apply_update_projectors(toggle_all=True)
# Update all checkboxes
for idx, chkbx in enumerate(self.checkboxes):
chkbx.setChecked(bool(self.mne.projs_on[idx]))
class _ChannelFig(FigureCanvasQTAgg):
def __init__(self, figure):
self.figure = figure
super().__init__(figure)
self.setFocusPolicy(Qt.StrongFocus | Qt.WheelFocus)
self.setFocus()
self._lasso_path = None
# Only update when mouse is pressed
self.setMouseTracking(False)
def paintEvent(self, event):
super().paintEvent(event)
# Lasso-Drawing doesn't seem to work with mpl, thus it is replicated
# in Qt.
if self._lasso_path is not None:
painter = QPainter(self)
painter.setPen(mkPen('red', width=2))
painter.drawPath(self._lasso_path)
painter.end()
def mouseMoveEvent(self, event):
super().mouseMoveEvent(event)
if self._lasso_path is None:
self._lasso_path = QPainterPath()
self._lasso_path.moveTo(event.pos())
else:
self._lasso_path.lineTo(event.pos())
self.update()
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
self._lasso_path = None
self.update()
def keyPressEvent(self, event):
event.ignore()
class SelectionDialog(_BaseDialog):
def __init__(self, main):
# Create widget
super().__init__(main, name='fig_selection',
title='Channel selection')
xpos = QApplication.desktop().screenGeometry().width() - 400
self.setGeometry(xpos, 100, 400, 800)
layout = QVBoxLayout()
# Add channel plot
fig = _figure_agg(figsize=(6, 6), dpi=96)
ax = fig.add_axes([0, 0, 1, 1])
self.channel_fig = plot_sensors(self.mne.info, kind='select',
ch_type='all', title='',
ch_groups=self.mne.group_by, axes=ax,
show=False)[0]
if hasattr(self.channel_fig.lasso, 'callbacks'):
# MNE >= 1.0
self.channel_fig.lasso.callbacks.append(self._set_custom_selection)
else:
# MNE <= 0.24
self.channel_fig.canvas.mpl_connect(
'lasso_event', self._set_custom_selection)
self.channel_widget = _ChannelFig(self.channel_fig)
layout.addWidget(self.channel_widget)
selections_dict = self.mne.ch_selections
selections_dict.update(Custom=np.array([], dtype=int)) # for lasso
self.chkbxs = OrderedDict()
for label in selections_dict:
chkbx = QCheckBox(label)
chkbx.clicked.connect(partial(self._chkbx_changed, label))
self.chkbxs[label] = chkbx
layout.addWidget(chkbx)
self.mne.old_selection = list(selections_dict.keys())[0]
self.chkbxs[self.mne.old_selection].setChecked(True)
self._update_highlighted_sensors()
# add instructions at bottom
instructions = (
'To use a custom selection, first click-drag on the sensor plot '
'to "lasso" the sensors you want to select, or hold Ctrl while '
'clicking individual sensors. Holding Ctrl while click-dragging '
'allows a lasso selection adding to (rather than replacing) the '
'existing selection.')
help_widget = QTextEdit(instructions)
help_widget.setReadOnly(True)
layout.addWidget(help_widget)
self.setLayout(layout)
self.show(center=False)
def _chkbx_changed(self, label):
# Disable butterfly if checkbox is clicked
if self.mne.butterfly:
self.main._set_butterfly(False)
# Disable other checkboxes
for chkbx in self.chkbxs.values():
chkbx.setChecked(False)
if (label == 'Custom' and
not len(self.mne.ch_selections['Custom'])):
label = self.mne.old_selection
# Select the checkbox no matter if clicked on when active or not
self.chkbxs[label].setChecked(True)
# Update selections
self.mne.old_selection = label
self.mne.picks = np.asarray(self.mne.ch_selections[label])
self.mne.n_channels = len(self.mne.picks)
# Update highlighted sensors
self._update_highlighted_sensors()
# if "Vertex" is defined, some channels appear twice, so if
# "Vertex" is selected, ch_start should be the *first* match;
# otherwise it should be the *last* match (since "Vertex" is
# always the first selection group, if it exists).
if label == 'Custom':
self.mne.ch_start = 0
else:
all_values = list()
for key, chs in self.mne.ch_selections.items():
if np.array_equal(chs, self.mne.picks):
self.mne.ch_start = len(all_values)
break
else:
all_values = np.concatenate([all_values, chs])
# Apply changes on view
self.mne.plt.setYRange(self.mne.ch_start,
self.mne.ch_start + self.mne.n_channels + 1,
padding=0)
# Update scrollbar
label_idx = list(self.mne.ch_selections.keys()).index(label)
self.mne.ax_vscroll.update_value(label_idx)
# Update all y-positions, because channels can appear in multiple
# selections on different y-positions
for trace in self.mne.traces:
trace.update_ypos()
trace.update_data()
def _set_custom_selection(self):
chs = self.channel_fig.lasso.selection
inds = np.in1d(self.mne.ch_names, chs)
self.mne.ch_selections['Custom'] = inds.nonzero()[0]
if any(inds):
self._chkbx_changed('Custom')
def _update_highlighted_sensors(self):
inds = np.in1d(self.mne.fig_selection.channel_fig.lasso.ch_names,
self.mne.ch_names[self.mne.picks]).nonzero()[0]
self.channel_fig.lasso.select_many(inds)
self.channel_widget.draw()
def _update_bad_sensors(self, pick, mark_bad):
sensor_picks = list()
ch_indices = channel_indices_by_type(self.mne.info)
for this_type in _DATA_CH_TYPES_SPLIT:
if this_type in self.mne.ch_types:
sensor_picks.extend(ch_indices[this_type])
sensor_idx = np.in1d(sensor_picks, pick).nonzero()[0]
# change the sensor color
fig = self.channel_fig
fig.lasso.ec[sensor_idx, 0] = float(mark_bad) # change R of RGBA array
fig.lasso.collection.set_edgecolors(fig.lasso.ec)
fig.canvas.draw_idle()
self.channel_widget.draw()
def _style_butterfly(self):
for key, chkbx in self.chkbxs.items():
if self.mne.butterfly:
chkbx.setChecked(False)
else:
if key == self.mne.old_selection:
chkbx.setChecked(True)
self._update_highlighted_sensors()
def _scroll_selection(self, step):
name_idx = list(self.mne.ch_selections.keys()).index(
self.mne.old_selection)
new_idx = np.clip(name_idx + step,
0, len(self.mne.ch_selections) - 1)
new_label = list(self.mne.ch_selections.keys())[new_idx]
self._chkbx_changed(new_label)
def _scroll_to_idx(self, idx):
all_values = list()
label = list(self.mne.ch_selections.keys())[0]
for key, values in self.mne.ch_selections.items():
all_values = np.concatenate([all_values, values])
if idx < len(all_values):
label = key
break
self._chkbx_changed(label)
def closeEvent(self, event):
super().closeEvent(event)
if hasattr(self.channel_fig.lasso, 'callbacks'):
# MNE >= 1.0
self.channel_fig.lasso.callbacks.clear()
for chkbx in self.chkbxs.values():
_disconnect(chkbx.clicked)
if hasattr(self, 'main'):
self.main.close()
class AnnotRegion(LinearRegionItem):
"""Graphics-Oobject for Annotations."""
regionChangeFinished = pyqtSignal(object)
gotSelected = pyqtSignal(object)
removeRequested = pyqtSignal(object)
def __init__(self, mne, description, values):
super().__init__(values=values, orientation='vertical',
movable=True, swapMode='sort',
bounds=(0, mne.xmax))
# Set default z-value to 0 to be behind other items in scene
self.setZValue(0)
self.sigRegionChangeFinished.connect(self._region_changed)
self.mne = mne
self.description = description
self.old_onset = values[0]
self.selected = False
self.label_item = TextItem(text=description, anchor=(0.5, 0.5))
self.label_item.setFont(QFont('AnyStyle', 10, QFont.Bold))
self.sigRegionChanged.connect(self.update_label_pos)
self.update_color()
def _region_changed(self):
self.regionChangeFinished.emit(self)
self.old_onset = self.getRegion()[0]
def update_color(self):
"""Update color of annotation-region."""
color_string = self.mne.annotation_segment_colors[self.description]
self.base_color = _get_color(color_string)
self.hover_color = _get_color(color_string)
self.text_color = _get_color(color_string)
self.base_color.setAlpha(75)
self.hover_color.setAlpha(150)
self.text_color.setAlpha(255)
self.line_pen = mkPen(color=self.hover_color, width=2)
self.hover_pen = mkPen(color=self.text_color, width=2)
self.setBrush(self.base_color)
self.setHoverBrush(self.hover_color)
self.label_item.setColor(self.text_color)
for line in self.lines:
line.setPen(self.line_pen)
line.setHoverPen(self.hover_pen)
self.update()
def update_description(self, description):
"""Update description of annoation-region."""
self.description = description
self.label_item.setText(description)
self.label_item.update()
def update_visible(self, visible):
"""Update if annotation-region is visible."""
self.setVisible(visible)
self.label_item.setVisible(visible)
def remove(self):
"""Remove annotation-region."""
self.removeRequested.emit(self)
vb = self.mne.viewbox
if vb and self.label_item in vb.addedItems:
vb.removeItem(self.label_item)
def select(self, selected):
"""Update select-state of annotation-region."""
self.selected = selected
if selected:
self.label_item.setColor('w')
self.label_item.fill = mkBrush(self.hover_color)
self.gotSelected.emit(self)
else:
self.label_item.setColor(self.text_color)
self.label_item.fill = mkBrush(None)
self.label_item.update()
def mouseClickEvent(self, event):
"""Customize mouse click events."""
if self.mne.annotation_mode:
if event.button() == Qt.LeftButton and self.movable:
self.select(True)
event.accept()
elif event.button() == Qt.RightButton and self.movable:
self.remove()
# Propagate remove request to lower annotations if overlapping
event.ignore()
else:
event.ignore()
def update_label_pos(self):
"""Update position of description-label from annotation-region."""
rgn = self.getRegion()
vb = self.mne.viewbox
if vb:
ymax = vb.viewRange()[1][1]
self.label_item.setPos(sum(rgn) / 2, ymax - 0.3)
class _AnnotEditDialog(_BaseDialog):
def __init__(self, annot_dock):
super().__init__(annot_dock.main, title='Edit Annotations')
self.ad = annot_dock
self.current_mode = None
layout = QVBoxLayout()
self.descr_label = QLabel()
if self.mne.selected_region:
self.mode_cmbx = QComboBox()
self.mode_cmbx.addItems(['all', 'selected'])
self.mode_cmbx.currentTextChanged.connect(self._mode_changed)
layout.addWidget(QLabel('Edit Scope:'))
layout.addWidget(self.mode_cmbx)
# Set group as default
self._mode_changed('all')
layout.addWidget(self.descr_label)
self.input_w = QLineEdit()
layout.addWidget(self.input_w)
bt_layout = QHBoxLayout()
ok_bt = QPushButton('Ok')
ok_bt.clicked.connect(self._edit)
bt_layout.addWidget(ok_bt)
cancel_bt = QPushButton('Cancel')
cancel_bt.clicked.connect(self.close)
bt_layout.addWidget(cancel_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
self.show()
def _mode_changed(self, mode):
self.current_mode = mode
if mode == 'all':
curr_des = self.ad.description_cmbx.currentText()
else:
curr_des = self.mne.selected_region.description
self.descr_label.setText(f'Change "{curr_des}" to:')
def _edit(self):
new_des = self.input_w.text()
if new_des:
if self.current_mode == 'all' or self.mne.selected_region is None:
self.ad._edit_description_all(new_des)
else:
self.ad._edit_description_selected(new_des)
self.close()
class AnnotationDock(QDockWidget):
"""Dock-Window for Management of annotations."""
def __init__(self, main):
super().__init__('Annotations')
self.main = main
self.mne = main.mne
self._init_ui()
self.setFeatures(QDockWidget.DockWidgetMovable |
QDockWidget.DockWidgetFloatable)
def _init_ui(self):
widget = QWidget()
layout = QHBoxLayout()
layout.setAlignment(Qt.AlignLeft)
self.description_cmbx = QComboBox()
self.description_cmbx.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.description_cmbx.activated.connect(self._description_changed)
self._update_description_cmbx()
layout.addWidget(self.description_cmbx)
add_bt = QPushButton('Add Description')
add_bt.clicked.connect(self._add_description_dlg)
layout.addWidget(add_bt)
rm_bt = QPushButton('Remove Description')
rm_bt.clicked.connect(self._remove_description_dlg)
layout.addWidget(rm_bt)
edit_bt = QPushButton('Edit Description')
edit_bt.clicked.connect(self._edit_description_dlg)
layout.addWidget(edit_bt)
# Uncomment when custom colors for annotations are implemented in
# MNE-Python.
# color_bt = QPushButton('Edit Color')
# color_bt.clicked.connect(self._set_color)
# layout.addWidget(color_bt)
select_bt = QPushButton('Select Visible')
select_bt.clicked.connect(self._select_annotations)
layout.addWidget(select_bt)
# Determine reasonable time decimals from sampling frequency.
time_decimals = int(np.ceil(np.log10(self.mne.info['sfreq'])))
layout.addWidget(QLabel('Start:'))
self.start_bx = QDoubleSpinBox()
self.start_bx.setDecimals(time_decimals)
self.start_bx.editingFinished.connect(self._start_changed)
layout.addWidget(self.start_bx)
layout.addWidget(QLabel('Stop:'))
self.stop_bx = QDoubleSpinBox()
self.stop_bx.setDecimals(time_decimals)
self.stop_bx.editingFinished.connect(self._stop_changed)
layout.addWidget(self.stop_bx)
help_bt = QPushButton(QIcon(":/help.svg"), 'Help')
help_bt.clicked.connect(self._show_help)
layout.addWidget(help_bt)
widget.setLayout(layout)
self.setWidget(widget)
def _add_description_to_cmbx(self, description):
color_pixmap = QPixmap(25, 25)
color = _get_color(self.mne.annotation_segment_colors[description])
color.setAlpha(75)
color_pixmap.fill(color)
color_icon = QIcon(color_pixmap)
self.description_cmbx.addItem(color_icon, description)
def _add_description(self, new_description):
self.mne.new_annotation_labels.append(new_description)
self.mne.visible_annotations[new_description] = True
self.main._setup_annotation_colors()
self._add_description_to_cmbx(new_description)
self.mne.current_description = new_description
self.description_cmbx.setCurrentText(new_description)
def _add_description_dlg(self):
new_description, ok = QInputDialog.getText(self,
'Set new description!',
'New description: ')
if ok and new_description \
and new_description not in self.mne.new_annotation_labels:
self._add_description(new_description)
def _edit_description_all(self, new_des):
"""Update descriptions of all annotations with the same description."""
old_des = self.description_cmbx.currentText()
edit_regions = [r for r in self.mne.regions
if r.description == old_des]
# Update regions & annotations
for ed_region in edit_regions:
idx = self.main._get_onset_idx(ed_region.getRegion()[0])
self.mne.inst.annotations.description[idx] = new_des
ed_region.update_description(new_des)
# Update containers with annotation-attributes
self.mne.new_annotation_labels.remove(old_des)
self.mne.new_annotation_labels = self.main._get_annotation_labels()
self.mne.visible_annotations[new_des] = \
self.mne.visible_annotations.pop(old_des)
self.mne.annotation_segment_colors[new_des] = \
self.mne.annotation_segment_colors.pop(old_des)
# Update related widgets
self.main._setup_annotation_colors()
self._update_regions_colors()
self._update_description_cmbx()
self.mne.overview_bar.update_annotations()
def _edit_description_selected(self, new_des):
"""Update description only of selected region."""
old_des = self.mne.selected_region.description
idx = self.main._get_onset_idx(self.mne.selected_region.getRegion()[0])
# Update regions & annotations
self.mne.inst.annotations.description[idx] = new_des
self.mne.selected_region.update_description(new_des)
# Update containers with annotation-attributes
if new_des not in self.mne.new_annotation_labels:
self.mne.new_annotation_labels.append(new_des)
self.mne.visible_annotations[new_des] = \
copy(self.mne.visible_annotations[old_des])
if old_des not in self.mne.inst.annotations.description:
self.mne.new_annotation_labels.remove(old_des)
self.mne.visible_annotations.pop(old_des)
self.mne.annotation_segment_colors[new_des] = \
self.mne.annotation_segment_colors.pop(old_des)
# Update related widgets
self.main._setup_annotation_colors()
self._update_regions_colors()
self._update_description_cmbx()
self.mne.overview_bar.update_annotations()
def _edit_description_dlg(self):
if len(self.mne.inst.annotations.description) > 0:
_AnnotEditDialog(self)
else:
self.main.message_box(text='No Annotations!',
info_text='There are no annotations '
'yet to edit!',
icon=QMessageBox.Information)
def _remove_description(self, rm_description):
# Remove regions
for rm_region in [r for r in self.mne.regions
if r.description == rm_description]:
rm_region.remove()
# Remove from descriptions
self.mne.new_annotation_labels.remove(rm_description)
self._update_description_cmbx()
# Remove from visible annotations
self.mne.visible_annotations.pop(rm_description)
# Remove from color-mapping
if rm_description in self.mne.annotation_segment_colors:
self.mne.annotation_segment_colors.pop(rm_description)
# Set first description in Combo-Box to current description
if self.description_cmbx.count() > 0:
self.description_cmbx.setCurrentIndex(0)
self.mne.current_description = \
self.description_cmbx.currentText()
def _remove_description_dlg(self):
rm_description = self.description_cmbx.currentText()
existing_annot = list(self.mne.inst.annotations.description).count(
rm_description)
if existing_annot > 0:
text = f'Remove annotations with {rm_description}?'
info_text = f'There exist {existing_annot} annotations with ' \
f'"{rm_description}".\n' \
f'Do you really want to remove them?'
buttons = QMessageBox.Yes | QMessageBox.No
ans = self.main.message_box(text=text, info_text=info_text,
buttons=buttons,
default_button=QMessageBox.Yes,
icon=QMessageBox.Question)
else:
ans = QMessageBox.Yes
if ans == QMessageBox.Yes:
self._remove_description(rm_description)
def _select_annotations(self):
def _set_visible_region(state, description):
self.mne.visible_annotations[description] = bool(state)
def _select_all():
for chkbx in chkbxs:
chkbx.setChecked(True)
def _clear_all():
for chkbx in chkbxs:
chkbx.setChecked(False)
select_dlg = QDialog(self)
chkbxs = list()
layout = QVBoxLayout()
layout.addWidget(QLabel('Select visible labels:'))
# Add descriptions to scroll-area to be scalable.
scroll_area = QScrollArea()
scroll_widget = QWidget()
scroll_layout = QVBoxLayout()
for des in self.mne.visible_annotations:
chkbx = QCheckBox(des)
chkbx.setChecked(self.mne.visible_annotations[des])
chkbx.stateChanged.connect(partial(_set_visible_region,
description=des))
chkbxs.append(chkbx)
scroll_layout.addWidget(chkbx)
scroll_widget.setLayout(scroll_layout)
scroll_area.setWidget(scroll_widget)
layout.addWidget(scroll_area)
bt_layout = QGridLayout()
all_bt = QPushButton('All')
all_bt.clicked.connect(_select_all)
bt_layout.addWidget(all_bt, 0, 0)
clear_bt = QPushButton('Clear')
clear_bt.clicked.connect(_clear_all)
bt_layout.addWidget(clear_bt, 0, 1)
ok_bt = QPushButton('Ok')
ok_bt.clicked.connect(select_dlg.close)
bt_layout.addWidget(ok_bt, 1, 0, 1, 2)
layout.addLayout(bt_layout)
select_dlg.setLayout(layout)
select_dlg.exec()
self.main._update_regions_visible()
def _description_changed(self, descr_idx):
new_descr = self.description_cmbx.itemText(descr_idx)
self.mne.current_description = new_descr
def _start_changed(self):
start = self.start_bx.value()
sel_region = self.mne.selected_region
if sel_region:
stop = sel_region.getRegion()[1]
if start < stop:
sel_region.setRegion((start, stop))
else:
self.main.message_box(text='Invalid value!',
info_text='Start can\'t be bigger or '
'equal to Stop!',
icon=QMessageBox.Critical,
modal=False)
self.start_bx.setValue(sel_region.getRegion()[0])
def _stop_changed(self):
stop = self.stop_bx.value()
sel_region = self.mne.selected_region
if sel_region:
start = sel_region.getRegion()[0]
if start < stop:
sel_region.setRegion((start, stop))
else:
self.main.message_box(text='Invalid value!',
info_text='Stop can\'t be smaller or '
'equal to Start!',
icon=QMessageBox.Critical)
self.stop_bx.setValue(sel_region.getRegion()[1])
def _set_color(self):
curr_descr = self.description_cmbx.currentText()
if curr_descr in self.mne.annotation_segment_colors:
curr_col = self.mne.annotation_segment_colors[curr_descr]
else:
curr_col = None
color = QColorDialog.getColor(_get_color(curr_col), self,
f'Choose color for {curr_descr}!')
if color.isValid():
self.mne.annotation_segment_colors[curr_descr] = color
self._update_regions_colors()
self._update_description_cmbx()
self.mne.overview_bar.update_annotations()
def update_values(self, region):
"""Update spinbox-values from region."""
rgn = region.getRegion()
self.start_bx.setValue(rgn[0])
self.stop_bx.setValue(rgn[1])
def _update_description_cmbx(self):
self.description_cmbx.clear()
descriptions = self.main._get_annotation_labels()
for description in descriptions:
self._add_description_to_cmbx(description)
self.description_cmbx.setCurrentText(self.mne.current_description)
def _update_regions_colors(self):
for region in self.mne.regions:
region.update_color()
def reset(self):
"""Reset to default state."""
if self.description_cmbx.count() > 0:
self.description_cmbx.setCurrentIndex(0)
self.mne.current_description = self.description_cmbx.currentText()
self.start_bx.setValue(0)
self.stop_bx.setValue(0)
def _show_help(self):
info_text = '<h1>Help</h1>' \
'<h2>Annotations</h2>' \
'<h3>Add Annotations</h3>' \
'Drag inside the data-view to create annotations with '\
'the description currently selected (leftmost item of '\
'the toolbar).If there is no description yet, add one ' \
'with the button "Add description".' \
'<h3>Remove Annotations</h3>' \
'You can remove single annotations by right-clicking on '\
'them.' \
'<h3>Edit Annotations</h3>' \
'You can edit annotations by dragging them or their '\
'boundaries. Or you can use the dials in the toolbar to '\
'adjust the boundaries for the current selected '\
'annotation.' \
'<h2>Descriptions</h2>' \
'<h3>Add Description</h3>' \
'Add a new description with ' \
'the button "Add description".' \
'<h3>Edit Description</h3>' \
'You can edit the description of one single annotation '\
'or all annotations of the currently selected kind with '\
'the button "Edit description".' \
'<h3>Remove Description</h3>' \
'You can remove all annotations of the currently '\
'selected kind with the button "Remove description".'
self.main.message_box(text='Annotations-Help',
info_text=info_text,
icon=QMessageBox.Information)
class BrowserView(GraphicsView):
"""Customized View as part of GraphicsView-Framework."""
def __init__(self, plot, **kwargs):
super().__init__(**kwargs)
self.setCentralItem(plot)
self.viewport().setAttribute(Qt.WA_AcceptTouchEvents, True)
self.viewport().grabGesture(Qt.PinchGesture)
self.viewport().grabGesture(Qt.SwipeGesture)
# def viewportEvent(self, event):
# """Customize viewportEvent for touch-gestures (WIP)."""
# if event.type() in [QEvent.TouchBegin, QEvent.TouchUpdate,
# QEvent.TouchEnd]:
# if event.touchPoints() == 2:
# pass
# elif event.type() == QEvent.Gesture:
# print('Gesture')
# return super().viewportEvent(event)
def mouseMoveEvent(self, ev):
"""Customize MouseMoveEvent."""
# Don't set GraphicsView.mouseEnabled to True,
# we only want part of the functionality pyqtgraph offers here.
super().mouseMoveEvent(ev)
self.sigSceneMouseMoved.emit(ev.pos())
class LoadThread(QThread):
"""A worker object for precomputing in a separate QThread."""
loadProgress = pyqtSignal(int)
processText = pyqtSignal(str)
loadingFinished = pyqtSignal()
def __init__(self, browser):
super().__init__()
self.browser = browser
self.mne = browser.mne
self.loadProgress.connect(self.mne.load_progressbar.setValue)
self.processText.connect(self.browser._show_process)
self.loadingFinished.connect(self.browser._precompute_finished)
def run(self):
"""Load and process data in a separate QThread."""
# Split data loading into 10 chunks to show user progress.
# Testing showed that e.g. n_chunks=100 extends loading time
# (at least for the sample dataset)
# because of the frequent gui-update-calls.
# Thus n_chunks = 10 should suffice.
data = None
if self.mne.is_epochs:
times = np.arange(len(self.mne.inst) * len(self.mne.inst.times)) \
/ self.mne.info['sfreq']
else:
times = None
n_chunks = min(10, len(self.mne.inst))
chunk_size = len(self.mne.inst) // n_chunks
for n in range(n_chunks):
start = n * chunk_size
if n == n_chunks - 1:
# Get last chunk which may be larger due to rounding above
stop = None
else:
stop = start + chunk_size
# Load epochs
if self.mne.is_epochs:
item = slice(start, stop)
with self.mne.inst.info._unlock():
data_chunk = np.concatenate(
self.mne.inst.get_data(item=item), axis=-1)
# Load raw
else:
data_chunk, times_chunk = self.browser._load_data(start, stop)
if times is None:
times = times_chunk
else:
times = np.concatenate((times, times_chunk), axis=0)
if data is None:
data = data_chunk
else:
data = np.concatenate((data, data_chunk), axis=1)
self.loadProgress.emit(n + 1)
picks = self.mne.ch_order
# Deactive remove dc because it will be removed for visible range
stashed_remove_dc = self.mne.remove_dc
self.mne.remove_dc = False
data = self.browser._process_data(data, 0, len(data), picks, self)
self.mne.remove_dc = stashed_remove_dc
self.mne.global_data = data
self.mne.global_times = times
# Calculate Z-Scores
self.processText.emit('Calculating Z-Scores...')
self.browser._get_zscore(data)
self.loadingFinished.emit()
def clean(self):
if self.isRunning():
wait_time = 10 # max. waiting time in seconds
logger.info('Waiting for Loading-Thread to finish... '
f'(max. {wait_time} sec)')
self.wait(int(wait_time * 1e3))
_disconnect(self.loadProgress)
_disconnect(self.processText)
_disconnect(self.loadingFinished)
del self.mne
del self.browser
class _FastToolTipComboBox(QComboBox):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setMouseTracking(True)
def setToolTip(self, tooltip):
self.tooltip = tooltip
def enterEvent(self, event):
QToolTip.showText(event.globalPos(), self.tooltip)
super().enterEvent(event)
class _PGMetaClass(type(BrowserBase), type(QMainWindow)):
"""Class is necessary to prevent a metaclass conflict.
The conflict arises due to the different types of QMainWindow and
BrowserBase.
"""
pass
# Those are the settings which are stored on each device
# depending on its operating system with QSettings.
qsettings_params = {
# Antialiasing (works with/without OpenGL, integer because QSettings
# can't handle booleans)
'antialiasing': False,
# Steps per view (relative to time)
'scroll_sensitivity': 100,
# Downsampling-Factor (or 'auto', see SettingsDialog for details)
'downsampling': 1,
# Downsampling-Method (set SettingsDialog for details)
'ds_method': 'peak'
}
def _disconnect(sig):
try:
sig.disconnect()
except TypeError: # if there are no connections, ignore it
pass
class PyQtGraphBrowser(BrowserBase, QMainWindow, metaclass=_PGMetaClass):
"""A PyQtGraph-backend for 2D data browsing."""
gotClosed = pyqtSignal()
def __init__(self, **kwargs):
self.backend_name = 'pyqtgraph'
BrowserBase.__init__(self, **kwargs)
QMainWindow.__init__(self)
# Add to list to keep a reference and avoid premature
# garbage-collection.
_browser_instances.append(self)
if self.mne.window_title is not None:
self.setWindowTitle(self.mne.window_title)
# Initialize attributes which are only used by pyqtgraph, not by
# matplotlib and add them to MNEBrowseParams.
# Exactly one MessageBox for messages to facilitate testing/debugging
self.msg_box = QMessageBox(self)
# MessageBox modality needs to be adapted for tests
# (otherwise test execution blocks)
self.test_mode = False
# A Settings-Dialog
self.mne.fig_settings = None
# Stores decimated data
self.mne.decim_data = None
self.mne.decim_times = None
# Stores ypos for selection-mode
self.mne.selection_ypos_dict = dict()
# Parameters for precomputing
self.mne.enable_precompute = False
self.mne.data_precomputed = False
self._rerun_load_thread = False
# Parameters for overviewbar
self.mne.show_overview_bar = True
self.mne.overview_mode = 'channels'
self.mne.zscore_rgba = None
# Container for traces
self.mne.traces = list()
# Scale-Factor
self.mne.scale_factor = 1
# Stores channel-types for butterfly-mode
self.mne.butterfly_type_order = [tp for tp in
_DATA_CH_TYPES_ORDER_DEFAULT
if tp in self.mne.ch_types]
if self.mne.is_epochs:
# Stores parameters for epochs
self.mne.epoch_dur = np.diff(self.mne.boundary_times[:2])[0]
epoch_idx = np.searchsorted(self.mne.midpoints,
(self.mne.t_start,
self.mne.t_start + self.mne.duration))
self.mne.epoch_idx = np.arange(epoch_idx[0], epoch_idx[1])
# Load from QSettings if available
for qparam in qsettings_params:
default = qsettings_params[qparam]
qvalue = QSettings().value(qparam, defaultValue=default)
# QSettings may alter types depending on OS
if not isinstance(qvalue, type(default)):
try:
qvalue = literal_eval(qvalue)
except (SyntaxError, ValueError):
if qvalue in ['true', 'false']:
qvalue = bool(qvalue)
else:
qvalue = default
setattr(self.mne, qparam, qvalue)
# Initialize channel-colors for faster indexing later
self.mne.ch_color_ref = dict()
for idx, ch_name in enumerate(self.mne.ch_names):
ch_type = self.mne.ch_types[idx]
self.mne.ch_color_ref[ch_name] = self.mne.ch_color_dict[ch_type]
# Initialize epoch colors for faster indexing later
if self.mne.is_epochs:
if self.mne.epoch_colors is None:
self.mne.epoch_color_ref = \
np.repeat([to_rgba_array(c) for c
in self.mne.ch_color_ref.values()],
len(self.mne.inst), axis=1)
else:
self.mne.epoch_color_ref = np.empty((len(self.mne.ch_names),
len(self.mne.inst), 4))
for epo_idx, epo in enumerate(self.mne.epoch_colors):
for ch_idx, color in enumerate(epo):
self.mne.epoch_color_ref[ch_idx, epo_idx] = \
to_rgba_array(color)
# Mark bad epochs
self.mne.epoch_color_ref[:, self.mne.bad_epochs] = \
to_rgba_array(self.mne.epoch_color_bad)
# Mark bad channels
bad_idxs = np.in1d(self.mne.ch_names, self.mne.info['bads'])
self.mne.epoch_color_ref[bad_idxs, :] = \
to_rgba_array(self.mne.ch_color_bad)
# Add Load-Progressbar for loading in a thread
self.mne.load_prog_label = QLabel('Loading...')
self.statusBar().addWidget(self.mne.load_prog_label)
self.mne.load_prog_label.hide()
self.mne.load_progressbar = QProgressBar()
# Set to n_chunks of LoadRunner
self.mne.load_progressbar.setMaximum(10)
self.statusBar().addWidget(self.mne.load_progressbar, stretch=1)
self.mne.load_progressbar.hide()
# A QThread for preloading
self.load_thread = LoadThread(self)
# Create centralWidget and layout
widget = QWidget()
layout = QGridLayout()
# Initialize Axis-Items
self.mne.time_axis = TimeAxis(self.mne)
self.mne.time_axis.setLabel(text='Time', units='s')
self.mne.channel_axis = ChannelAxis(self)
self.mne.viewbox = RawViewBox(self)
# Start precomputing if enabled
self._init_precompute()
# Initialize data (needed in DataTrace.update_data).
self._update_data()
# Initialize Trace-Plot
self.mne.plt = PlotItem(viewBox=self.mne.viewbox,
axisItems={'bottom': self.mne.time_axis,
'left': self.mne.channel_axis})
# Hide AutoRange-Button
self.mne.plt.hideButtons()
# Configure XY-Range
if self.mne.is_epochs:
self.mne.xmax = len(self.mne.inst.times) * len(self.mne.inst) \
/ self.mne.info['sfreq']
else:
self.mne.xmax = self.mne.inst.times[-1]
# Add one empty line as padding at top (y=0).
# Negative Y-Axis to display channels from top.
self.mne.ymax = len(self.mne.ch_order) + 1
self.mne.plt.setLimits(xMin=0, xMax=self.mne.xmax,
yMin=0, yMax=self.mne.ymax)
# Connect Signals from PlotItem
self.mne.plt.sigXRangeChanged.connect(self._xrange_changed)
self.mne.plt.sigYRangeChanged.connect(self._yrange_changed)
# Add traces
for ch_idx in self.mne.picks:
DataTrace(self, ch_idx)
# Initialize Epochs Grid
if self.mne.is_epochs:
grid_pen = mkPen(color='k', width=2, style=Qt.DashLine)
for x_grid in self.mne.boundary_times[1:-1]:
grid_line = InfiniteLine(pos=x_grid,
pen=grid_pen,
movable=False)
self.mne.plt.addItem(grid_line)
# Add events
if getattr(self.mne, 'event_nums', None) is not None:
self.mne.events_visible = True
for ev_time, ev_id in zip(self.mne.event_times,
self.mne.event_nums):
color = self.mne.event_color_dict[ev_id]
event_line = EventLine(ev_time, ev_id, color)
self.mne.event_lines.append(event_line)
if 0 < ev_time < self.mne.duration:
self.mne.plt.addItem(event_line)
else:
self.mne.events_visible = False
# Add Scale-Bars
self._add_scalebars()
# Check for OpenGL
if self.mne.use_opengl is None: # default: opt-in
self.mne.use_opengl = (
get_config('MNE_BROWSE_USE_OPENGL', '').lower() == 'true')
# Epochs currently only work with OpenGL enabled
# (https://github.com/mne-tools/mne-qt-browser/issues/53)
mac_epochs = self.mne.is_epochs and sys.platform == 'darwin'
if mac_epochs:
self.mne.use_opengl = True
if self.mne.use_opengl:
try:
import OpenGL
except (ModuleNotFoundError, ImportError):
warn('PyOpenGL was not found and OpenGL can\'t be used!\n'
'Consider installing pyopengl with pip or conda'
'or set "use_opengl" to False to avoid this warning.')
if mac_epochs:
warn('Plotting epochs on MacOS without OpenGL'
'may be unstable!')
self.mne.use_opengl = False
else:
logger.info(
f'Using pyopengl with version {OpenGL.__version__}')
# Initialize BrowserView (inherits QGraphicsView)
self.mne.view = BrowserView(self.mne.plt,
useOpenGL=self.mne.use_opengl,
background='w')
if hasattr(self.mne, 'bgcolor'):
bgcolor = self.mne.bgcolor
else:
bgcolor = 'w'
self.mne.view.setBackground(_get_color(bgcolor))
layout.addWidget(self.mne.view, 0, 0)
# Initialize Scroll-Bars
self.mne.ax_hscroll = TimeScrollBar(self.mne)
layout.addWidget(self.mne.ax_hscroll, 1, 0, 1, 2)
self.mne.ax_vscroll = ChannelScrollBar(self.mne)
layout.addWidget(self.mne.ax_vscroll, 0, 1)
# Initialize VLine
self.mne.vline = None
self.mne.vline_visible = False
# Initialize crosshair (as in pyqtgraph example)
self.mne.crosshair_enabled = False
self.mne.crosshair_h = None
self.mne.crosshair = None
self.mne.view.sigSceneMouseMoved.connect(self._mouse_moved)
# Initialize Annotation-Widgets
self.mne.annotation_mode = False
if not self.mne.is_epochs:
self._init_annot_mode()
# OverviewBar
self.mne.overview_bar = OverviewBar(self)
layout.addWidget(self.mne.overview_bar, 2, 0, 1, 2)
# Add Combobox to select Overview-Mode
self.overview_mode_chkbx = _FastToolTipComboBox()
self.overview_mode_chkbx.addItems(['empty', 'channels'])
tooltip = (
'<h2>Overview-Modes</h2>'
'<ul>'
'<li>empty:<br>'
'Display no background.</li>'
'<li>channels:<br>'
'Display each channel with its channel-type color.</li>'
'<li>zscore:<br>'
'Display the zscore for the data from each channel across time. '
'Red indicates high zscores, blue indicates low zscores, '
'and the boundaries of the color gradient are defined by the '
'minimum/maximum zscore.'
'This only works if precompute is set to "True", or if it is '
'enabled with "auto" and enough free RAM is available.</li>'
'</ul>')
self.overview_mode_chkbx.setToolTip(tooltip)
if self.mne.enable_precompute:
self.overview_mode_chkbx.addItems(['zscore'])
self.overview_mode_chkbx.setCurrentText(self.mne.overview_mode)
self.overview_mode_chkbx.currentTextChanged.connect(
self._overview_mode_changed)
# Avoid taking keyboard-focus
self.overview_mode_chkbx.setFocusPolicy(Qt.NoFocus)
overview_mode_layout = QHBoxLayout()
overview_mode_layout.addWidget(QLabel('Overview-Mode:'))
overview_mode_layout.addWidget(self.overview_mode_chkbx)
overview_mode_widget = QWidget()
overview_mode_widget.setLayout(overview_mode_layout)
self.statusBar().addPermanentWidget(overview_mode_widget)
widget.setLayout(layout)
self.setCentralWidget(widget)
# Initialize Selection-Dialog
if getattr(self.mne, 'group_by', None) in ['position', 'selection']:
self._create_selection_fig()
# Initialize Projectors-Dialog if show_options=True
if getattr(self.mne, 'show_options', False):
self._toggle_proj_fig()
# Initialize Toolbar
self.mne.toolbar = self.addToolBar('Tools')
self.mne.toolbar.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
adecr_time = QAction(QIcon(":/less_time.svg"), '- Time', parent=self)
adecr_time.triggered.connect(partial(self.change_duration, -0.2))
self.mne.toolbar.addAction(adecr_time)
aincr_time = QAction(QIcon(":/more_time.svg"), '+ Time', parent=self)
aincr_time.triggered.connect(partial(self.change_duration, 0.25))
self.mne.toolbar.addAction(aincr_time)
adecr_nchan = QAction(QIcon(":/less_channels.svg"), '- Channels',
parent=self)
adecr_nchan.triggered.connect(partial(self.change_nchan, -10))
self.mne.toolbar.addAction(adecr_nchan)
aincr_nchan = QAction(QIcon(":/more_channels.svg"), '+ Channels',
parent=self)
aincr_nchan.triggered.connect(partial(self.change_nchan, 10))
self.mne.toolbar.addAction(aincr_nchan)
adecr_nchan = QAction(QIcon(":/zoom_out.svg"), 'Zoom Out', parent=self)
adecr_nchan.triggered.connect(partial(self.scale_all, 4 / 5))
self.mne.toolbar.addAction(adecr_nchan)
aincr_nchan = QAction(QIcon(":/zoom_in.svg"), 'Zoom In', parent=self)
aincr_nchan.triggered.connect(partial(self.scale_all, 5 / 4))
self.mne.toolbar.addAction(aincr_nchan)
if not self.mne.is_epochs:
atoggle_annot = QAction(QIcon(":/annotations.svg"), 'Annotations',
parent=self)
atoggle_annot.triggered.connect(self._toggle_annotation_fig)
self.mne.toolbar.addAction(atoggle_annot)
atoggle_proj = QAction(QIcon(":/ssp.svg"), 'SSP', parent=self)
atoggle_proj.triggered.connect(self._toggle_proj_fig)
self.mne.toolbar.addAction(atoggle_proj)
atoggle_fullscreen = QAction(QIcon(":/fullscreen.svg"), 'Fullscreen',
parent=self)
atoggle_fullscreen.triggered.connect(self._toggle_fullscreen)
self.mne.toolbar.addAction(atoggle_fullscreen)
asettings = QAction(QIcon(":/settings.svg"), 'Settings',
parent=self)
asettings.triggered.connect(self._toggle_settings_fig)
self.mne.toolbar.addAction(asettings)
ahelp = QAction(QIcon(":/help.svg"), 'Help', parent=self)
ahelp.triggered.connect(self._toggle_help_fig)
self.mne.toolbar.addAction(ahelp)
# Set Start-Range (after all necessary elements are initialized)
self.mne.plt.setXRange(self.mne.t_start,
self.mne.t_start + self.mne.duration,
padding=0)
if self.mne.butterfly:
self._set_butterfly(True)
else:
self.mne.plt.setYRange(0, self.mne.n_channels + 1, padding=0)
# Set Size
width = int(self.mne.figsize[0] * self.logicalDpiX())
height = int(self.mne.figsize[1] * self.logicalDpiY())
self.resize(width, height)
# Initialize Keyboard-Shortcuts
is_mac = platform.system() == 'Darwin'
dur_keys = ('fn + ←', 'fn + →') if is_mac else ('Home', 'End')
ch_keys = ('fn + ↑', 'fn + ↓') if is_mac else ('Page up', 'Page down')
hscroll_type = '1 epoch' if self.mne.is_epochs else '¼ page'
self.mne.keyboard_shortcuts = {
'left': {
'alias': '←',
'qt_key': Qt.Key_Left,
'modifier': [None, 'Shift'],
'slot': [self.hscroll],
'parameter': [-40, '-full'],
'description': [f'Scroll left ({hscroll_type})',
'Scroll left (full page)']
},
'right': {
'alias': '→',
'qt_key': Qt.Key_Right,
'modifier': [None, 'Shift'],
'slot': [self.hscroll],
'parameter': [40, '+full'],
'description': [f'Scroll right ({hscroll_type})',
'Scroll right (full page)']
},
'up': {
'alias': '↑',
'qt_key': Qt.Key_Up,
'slot': [self.vscroll],
'parameter': ['-full'],
'description': ['Scroll up (full page)']
},
'down': {
'alias': '↓',
'qt_key': Qt.Key_Down,
'slot': [self.vscroll],
'parameter': ['+full'],
'description': ['Scroll down (full page)']
},
'home': {
'alias': dur_keys[0],
'qt_key': Qt.Key_Home,
'slot': [self.change_duration],
'parameter': [-0.2],
'description': [f'Decrease duration ({hscroll_type})']
},
'end': {
'alias': dur_keys[1],
'qt_key': Qt.Key_End,
'slot': [self.change_duration],
'parameter': [0.25],
'description': [f'Increase duration ({hscroll_type})']
},
'pagedown': {
'alias': ch_keys[0],
'qt_key': Qt.Key_PageDown,
'modifier': [None, 'Shift'],
'slot': [self.change_nchan],
'parameter': [-1, -10],
'description': ['Decrease shown channels (1)',
'Decrease shown channels (10)']
},
'pageup': {
'alias': ch_keys[1],
'qt_key': Qt.Key_PageUp,
'modifier': [None, 'Shift'],
'slot': [self.change_nchan],
'parameter': [1, 10],
'description': ['Increase shown channels (1)',
'Increase shown channels (10)']
},
'-': {
'qt_key': Qt.Key_Minus,
'slot': [self.scale_all],
'parameter': [4 / 5],
'description': ['Decrease Scale']
},
'+': {
'qt_key': Qt.Key_Plus,
'slot': [self.scale_all],
'parameter': [5 / 4],
'description': ['Increase Scale']
},
'=': {
'qt_key': Qt.Key_Equal,
'slot': [self.scale_all],
'parameter': [5 / 4],
'description': ['Increase Scale']
},
'a': {
'qt_key': Qt.Key_A,
'slot': [self._toggle_annotation_fig,
self._toggle_annotations],
'modifier': [None, 'Shift'],
'description': ['Toggle Annotation-Tool',
'Toggle Annotations visible']
},
'b': {
'qt_key': Qt.Key_B,
'slot': [self._toggle_butterfly],
'description': ['Toggle Butterfly']
},
'd': {
'qt_key': Qt.Key_D,
'slot': [self._toggle_dc],
'description': ['Toggle DC-Correction']
},
'e': {
'qt_key': Qt.Key_E,
'slot': [self._toggle_events],
'description': ['Toggle Events visible']
},
'h': {
'qt_key': Qt.Key_H,
'slot': [self._toggle_epoch_histogram],
'description': ['Toggle Epoch-Histogram']
},
'j': {
'qt_key': Qt.Key_J,
'slot': [self._toggle_proj_fig,
self._toggle_all_projs],
'modifier': [None, 'Shift'],
'description': ['Toggle Projection Figure',
'Toggle all projections']
},
'l': {
'qt_key': Qt.Key_L,
'slot': [self._toggle_antialiasing],
'description': ['Toggle Antialiasing']
},
'o': {
'qt_key': Qt.Key_O,
'slot': [self._toggle_overview_bar],
'description': ['Toggle Overview-Bar']
},
't': {
'qt_key': Qt.Key_T,
'slot': [self._toggle_time_format],
'description': ['Toggle Time-Format']
},
's': {
'qt_key': Qt.Key_S,
'slot': [self._toggle_scalebars],
'description': ['Toggle Scalebars']
},
'w': {
'qt_key': Qt.Key_W,
'slot': [self._toggle_whitening],
'description': ['Toggle Whitening']
},
'x': {
'qt_key': Qt.Key_X,
'slot': [self._toggle_crosshair],
'description': ['Toggle Crosshair']
},
'z': {
'qt_key': Qt.Key_Z,
'slot': [self._toggle_zenmode],
'description': ['Toggle Zen-Mode']
},
'?': {
'qt_key': Qt.Key_Question,
'slot': [self._toggle_help_fig],
'description': ['Show Help']
},
'f11': {
'qt_key': Qt.Key_F11,
'slot': [self._toggle_fullscreen],
'description': ['Toggle Full-Screen']
},
'escape': {
'qt_key': Qt.Key_Escape,
'slot': [self.close],
'description': ['Close']
},
# Just for testing
'enter': {
'qt_key': Qt.Key_Enter
},
' ': {
'qt_key': Qt.Key_Space
}
}
def _update_yaxis_labels(self):
self.mne.channel_axis.repaint()
def _add_scalebars(self):
"""Add scalebars for all channel-types.
(scene handles showing them in when in view
range)
"""
self.mne.scalebars.clear()
# To keep order (np.unique sorts)
ordered_types = self.mne.ch_types[self.mne.ch_order]
unique_type_idxs = np.unique(ordered_types,
return_index=True)[1]
ch_types_ordered = [ordered_types[idx] for idx
in sorted(unique_type_idxs)]
for ch_type in [ct for ct in ch_types_ordered
if ct != 'stim' and
ct in self.mne.scalings and
ct in getattr(self.mne, 'units', {}) and
ct in getattr(self.mne, 'unit_scalings', {})]:
scale_bar = ScaleBar(self.mne, ch_type)
self.mne.scalebars[ch_type] = scale_bar
self.mne.plt.addItem(scale_bar)
scale_bar_text = ScaleBarText(self.mne, ch_type)
self.mne.scalebar_texts[ch_type] = scale_bar_text
self.mne.plt.addItem(scale_bar_text)
self._set_scalebars_visible(self.mne.scalebars_visible)
def _update_scalebar_x_positions(self):
if self.mne.scalebars_visible:
for scalebar in self.mne.scalebars.values():
scalebar.update_x_position()
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.update_x_position()
def _update_scalebar_y_positions(self):
if self.mne.scalebars_visible:
for scalebar in self.mne.scalebars.values():
scalebar.update_y_position()
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.update_y_position()
def _update_scalebar_values(self):
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.update_value()
def _set_scalebars_visible(self, visible):
for scalebar in self.mne.scalebars.values():
scalebar.setVisible(visible)
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.setVisible(visible)
self._update_scalebar_y_positions()
def _toggle_scalebars(self):
self.mne.scalebars_visible = not self.mne.scalebars_visible
self._set_scalebars_visible(self.mne.scalebars_visible)
def _overview_mode_changed(self, new_mode):
self.mne.overview_mode = new_mode
if self.mne.overview_mode == 'zscore':
while self.mne.zscore_rgba is None:
QApplication.processEvents()
self.mne.overview_bar.set_background()
def scale_all(self, step):
"""Scale all traces by multiplying with step."""
self.mne.scale_factor *= step
# Reapply clipping if necessary
if self.mne.clipping is not None:
self._update_data()
# Scale Traces (by scaling the Item, not the data)
for line in self.mne.traces:
line.update_scale()
# Update Scalebars
self._update_scalebar_values()
def hscroll(self, step):
"""Scroll horizontally by step."""
if step == '+full':
rel_step = self.mne.duration
elif step == '-full':
rel_step = - self.mne.duration
elif self.mne.is_epochs:
direction = 1 if step > 0 else -1
rel_step = direction * self.mne.duration / self.mne.n_epochs
else:
rel_step = step * self.mne.duration / self.mne.scroll_sensitivity
# Get current range and add step to it
xmin, xmax = [i + rel_step for i in self.mne.viewbox.viewRange()[0]]
if xmin < 0:
xmin = 0
xmax = xmin + self.mne.duration
elif xmax > self.mne.xmax:
xmax = self.mne.xmax
xmin = xmax - self.mne.duration
self.mne.plt.setXRange(xmin, xmax, padding=0)
def vscroll(self, step):
"""Scroll vertically by step."""
if self.mne.fig_selection is not None:
if step == '+full':
step = 1
elif step == '-full':
step = -1
else:
step = int(step)
self.mne.fig_selection._scroll_selection(step)
elif self.mne.butterfly:
return
else:
# Get current range and add step to it
if step == '+full':
step = self.mne.n_channels
elif step == '-full':
step = - self.mne.n_channels
ymin, ymax = [i + step for i in self.mne.viewbox.viewRange()[1]]
if ymin < 0:
ymin = 0
ymax = self.mne.n_channels + 1
elif ymax > self.mne.ymax:
ymax = self.mne.ymax
ymin = ymax - self.mne.n_channels - 1
self.mne.plt.setYRange(ymin, ymax, padding=0)
def change_duration(self, step):
"""Change duration by step."""
xmin, xmax = self.mne.viewbox.viewRange()[0]
if self.mne.is_epochs:
# use the length of one epoch as duration change
min_dur = len(self.mne.inst.times) / self.mne.info['sfreq']
step_dir = (1 if step > 0 else -1)
rel_step = min_dur * step_dir
self.mne.n_epochs = np.clip(self.mne.n_epochs + step_dir,
1, len(self.mne.inst))
else:
# never show fewer than 3 samples
min_dur = 3 * np.diff(self.mne.inst.times[:2])[0]
rel_step = self.mne.duration * step
xmax += rel_step
if xmax - xmin < min_dur:
xmax = xmin + min_dur
if xmax > self.mne.xmax:
diff = xmax - self.mne.xmax
xmax = self.mne.xmax
xmin -= diff
if xmin < 0:
xmin = 0
self.mne.ax_hscroll.update_duration()
self.mne.plt.setXRange(xmin, xmax, padding=0)
def change_nchan(self, step):
"""Change number of channels by step."""
if not self.mne.butterfly:
if step == '+full':
step = self.mne.n_channels
elif step == '-full':
step = - self.mne.n_channels
ymin, ymax = self.mne.viewbox.viewRange()[1]
ymax += step
if ymax > self.mne.ymax:
ymax = self.mne.ymax
ymin -= step
if ymin < 0:
ymin = 0
if ymax - ymin <= 2:
ymax = ymin + 2
self.mne.ax_vscroll.update_nchan()
self.mne.plt.setYRange(ymin, ymax, padding=0)
def _remove_vline(self):
if self.mne.vline is not None:
if self.mne.is_epochs:
for vline in self.mne.vline:
self.mne.plt.removeItem(vline)
else:
self.mne.plt.removeItem(self.mne.vline)
self.mne.vline = None
self.mne.vline_visible = False
self.mne.overview_bar.update_vline()
def _get_vline_times(self, t):
rel_time = t % self.mne.epoch_dur
abs_time = self.mne.times[0]
ts = np.arange(
self.mne.n_epochs) * self.mne.epoch_dur + abs_time + rel_time
return ts
def _vline_slot(self, orig_vline):
if self.mne.is_epochs:
ts = self._get_vline_times(orig_vline.value())
for vl, xt in zip(self.mne.vline, ts):
if vl != orig_vline:
vl.setPos(xt)
self.mne.overview_bar.update_vline()
def _add_vline(self, t):
if self.mne.is_epochs:
ts = self._get_vline_times(t)
# Add vline if None
if self.mne.vline is None:
self.mne.vline = list()
for xt in ts:
epo_idx = np.clip(
np.searchsorted(self.mne.boundary_times, xt) - 1,
0, len(self.mne.inst))
bmin, bmax = self.mne.boundary_times[epo_idx:epo_idx + 2]
# Avoid off-by-one-error at bmax for VlineLabel
bmax -= 1 / self.mne.info['sfreq']
vl = VLine(self.mne, xt, bounds=(bmin, bmax))
# Should only be emitted when dragged
vl.sigPositionChangeFinished.connect(self._vline_slot)
self.mne.vline.append(vl)
self.mne.plt.addItem(vl)
else:
for vl, xt in zip(self.mne.vline, ts):
vl.setPos(xt)
else:
if self.mne.vline is None:
self.mne.vline = VLine(self.mne, t, bounds=(0, self.mne.xmax))
self.mne.vline.sigPositionChangeFinished.connect(
self._vline_slot)
self.mne.plt.addItem(self.mne.vline)
else:
self.mne.vline.setPos(t)
self.mne.vline_visible = True
self.mne.overview_bar.update_vline()
def _mouse_moved(self, pos):
"""Show Crosshair if enabled at mouse move."""
if self.mne.crosshair_enabled:
if self.mne.plt.sceneBoundingRect().contains(pos):
mousePoint = self.mne.viewbox.mapSceneToView(pos)
x, y = mousePoint.x(), mousePoint.y()
if (0 <= x <= self.mne.xmax and
0 <= y <= self.mne.ymax):
if not self.mne.crosshair:
self.mne.crosshair = Crosshair()
self.mne.plt.addItem(self.mne.crosshair,
ignoreBounds=True)
# Get ypos from trace
trace = [tr for tr in self.mne.traces if
tr.ypos - 0.5 < y < tr.ypos + 0.5]
if len(trace) == 1:
trace = trace[0]
idx = np.searchsorted(self.mne.times, x)
if self.mne.data_precomputed:
data = self.mne.data[trace.order_idx]
else:
data = self.mne.data[trace.range_idx]
yvalue = data[idx]
yshown = yvalue + trace.ypos
self.mne.crosshair.set_data(x, yshown)
# relative x for epochs
if self.mne.is_epochs:
rel_idx = idx % len(self.mne.inst.times)
x = self.mne.inst.times[rel_idx]
# negative because plot is inverted for Y
scaler = -1 if self.mne.butterfly else -2
inv_norm = (scaler *
self.mne.scalings[trace.ch_type] *
self.mne.unit_scalings[trace.ch_type] /
self.mne.scale_factor)
label = f'{_simplify_float(yvalue * inv_norm)} ' \
f'{self.mne.units[trace.ch_type]}'
self.statusBar().showMessage(f'x={x:.3f} s, '
f'y={label}')
def _toggle_crosshair(self):
self.mne.crosshair_enabled = not self.mne.crosshair_enabled
if self.mne.crosshair:
self.mne.plt.removeItem(self.mne.crosshair)
self.mne.crosshair = None
def _xrange_changed(self, _, xrange):
# Update data
if self.mne.is_epochs:
if self.mne.vline is not None:
rel_vl_t = self.mne.vline[0].value() \
- self.mne.boundary_times[self.mne.epoch_idx][0]
# Depends on only allowing xrange showing full epochs
boundary_idxs = np.searchsorted(self.mne.midpoints, xrange)
self.mne.epoch_idx = np.arange(*boundary_idxs)
# Update colors
for trace in self.mne.traces:
trace.update_color()
# Update vlines
if self.mne.vline is not None:
for bmin, bmax, vl in zip(self.mne.boundary_times[
self.mne.epoch_idx],
self.mne.boundary_times[
self.mne.epoch_idx + 1],
self.mne.vline):
# Avoid off-by-one-error at bmax for VlineLabel
bmax -= 1 / self.mne.info['sfreq']
vl.setBounds((bmin, bmax))
vl.setValue(bmin + rel_vl_t)
self.mne.t_start = xrange[0]
self.mne.duration = xrange[1] - xrange[0]
self._redraw(update_data=True)
# Update annotations
if not self.mne.is_epochs:
self._update_annotations_xrange(xrange)
# Update Events
self._update_events_xrange(xrange)
# Update Time-Bar
self.mne.ax_hscroll.update_value(xrange[0])
# Update Overview-Bar
self.mne.overview_bar.update_viewrange()
# Update Scalebars
self._update_scalebar_x_positions()
def _update_events_xrange(self, xrange):
"""Add or remove event-lines depending on view-range.
This has proven to be more performant (and scalable)
than adding all event-lines to plt(the Scene)
and letting pyqtgraph/Qt handle it.
"""
if self.mne.events_visible:
for ev_line in self.mne.event_lines:
if xrange[0] < ev_line.pos().x() < xrange[1]:
if ev_line not in self.mne.plt.items:
self.mne.plt.addItem(ev_line)
else:
if ev_line in self.mne.plt.items:
self.mne.plt.removeItem(ev_line)
def _update_annotations_xrange(self, xrange):
"""Add or remove annotation-regions depending on view-range.
This has proven to be more performant (and scalable)
than adding all annotations to plt(the Scene)
and letting pyqtgraph/Qt handle it.
"""
if self.mne.annotations_visible:
for region in self.mne.regions:
if self.mne.visible_annotations[region.description]:
rmin, rmax = region.getRegion()
xmin, xmax = xrange
comparisons = [rmin < xmin,
rmin < xmax,
rmax < xmin,
rmax < xmax]
if all(comparisons) or not any(comparisons):
if region in self.mne.plt.items:
self.mne.plt.removeItem(region)
self.mne.plt.removeItem(region.label_item)
else:
if region not in self.mne.plt.items:
self.mne.plt.addItem(region)
self.mne.plt.addItem(region.label_item)
def _yrange_changed(self, _, yrange):
if not self.mne.butterfly:
if not self.mne.fig_selection:
# Update picks and data
self.mne.ch_start = np.clip(round(yrange[0]), 0,
len(self.mne.ch_order)
- self.mne.n_channels)
self.mne.n_channels = round(yrange[1] - yrange[0] - 1)
self._update_picks()
# Update Channel-Bar
self.mne.ax_vscroll.update_value(self.mne.ch_start)
self._update_data()
# Update Overview-Bar
self.mne.overview_bar.update_viewrange()
# Update Scalebars
self._update_scalebar_y_positions()
off_traces = [tr for tr in self.mne.traces
if tr.ch_idx not in self.mne.picks]
add_idxs = [p for p in self.mne.picks
if p not in [tr.ch_idx for tr in self.mne.traces]]
# Update range_idx for traces which just shifted in y-position
for trace in [tr for tr in self.mne.traces if tr not in off_traces]:
trace.update_range_idx()
# Update number of traces.
trace_diff = len(self.mne.picks) - len(self.mne.traces)
# Remove unnecessary traces.
if trace_diff < 0:
# Only remove from traces not in picks.
remove_traces = off_traces[:abs(trace_diff)]
for trace in remove_traces:
trace.remove()
off_traces.remove(trace)
# Add new traces if necessary.
if trace_diff > 0:
# Make copy to avoid skipping iteration.
idxs_copy = add_idxs.copy()
for aidx in idxs_copy[:trace_diff]:
DataTrace(self, aidx)
add_idxs.remove(aidx)
# Update data of traces outside of yrange (reuse remaining trace-items)
for trace, ch_idx in zip(off_traces, add_idxs):
trace.set_ch_idx(ch_idx)
trace.update_color()
trace.update_data()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DATA HANDLING
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _apply_downsampling(self):
"""
Get ds-factor and apply ds with one of multiple methods.
The methods are taken from PlotDataItem in pyqtgraph
and adjusted to multi-channel data.
"""
# Get Downsampling-Factor
# Auto-Downsampling from pyqtgraph
if self.mne.downsampling == 'auto':
ds = 1
if all([hasattr(self.mne, a) for a in ['viewbox', 'times']]):
vb = self.mne.viewbox
if vb is not None:
view_range = vb.viewRect()
else:
view_range = None
if view_range is not None and len(self.mne.times) > 1:
dx = float(self.mne.times[-1] - self.mne.times[0]) / (
len(self.mne.times) - 1)
if dx != 0.0:
x0 = view_range.left() / dx
x1 = view_range.right() / dx
width = vb.width()
if width != 0.0:
# Auto-Downsampling with 5 samples per pixel
ds = int(max(1, (x1 - x0) / (width * 5)))
else:
ds = self.mne.downsampling
# Apply Downsampling
if ds not in [None, 1]:
times = self.mne.times
data = self.mne.data
n_ch = data.shape[0]
if self.mne.ds_method == 'subsample':
times = times[::ds]
data = data[:, ::ds]
elif self.mne.ds_method == 'mean':
n = len(times) // ds
# start of x-values
# try to select a somewhat centered point
stx = ds // 2
times = times[stx:stx + n * ds:ds]
rs_data = data[:, :n * ds].reshape(n_ch, n, ds)
data = rs_data.mean(axis=2)
elif self.mne.ds_method == 'peak':
n = len(times) // ds
# start of x-values
# try to select a somewhat centered point
stx = ds // 2
x1 = np.empty((n, 2))
x1[:] = times[stx:stx + n * ds:ds, np.newaxis]
times = x1.reshape(n * 2)
y1 = np.empty((n_ch, n, 2))
y2 = data[:, :n * ds].reshape((n_ch, n, ds))
y1[:, :, 0] = y2.max(axis=2)
y1[:, :, 1] = y2.min(axis=2)
data = y1.reshape((n_ch, n * 2))
self.mne.times, self.mne.data = times, data
def _show_process(self, message):
if self.mne.load_progressbar.isVisible():
self.mne.load_progressbar.hide()
self.mne.load_prog_label.hide()
self.statusBar().showMessage(message)
def _precompute_finished(self):
self.statusBar().showMessage('Loading Finished', 5)
self.mne.data_precomputed = True
if self.mne.overview_mode == 'zscore':
# Show loaded overview image
self.mne.overview_bar.set_background()
if self._rerun_load_thread:
self._rerun_load_thread = False
self._init_precompute()
def _init_precompute(self):
# Remove previously loaded data
self.mne.data_precomputed = False
if all([hasattr(self.mne, st)
for st in ['global_data', 'global_times']]):
del self.mne.global_data, self.mne.global_times
gc.collect()
if self.mne.precompute == 'auto':
self.mne.enable_precompute = self._check_space_for_precompute()
elif isinstance(self.mne.precompute, bool):
self.mne.enable_precompute = self.mne.precompute
if self.mne.enable_precompute:
# Start precompute thread
self.mne.load_progressbar.show()
self.mne.load_prog_label.show()
self.load_thread.start()
def _rerun_precompute(self):
if self.load_thread.isRunning():
self._rerun_load_thread = True
else:
self._init_precompute()
def _check_space_for_precompute(self):
try:
import psutil
except ImportError:
logger.info('Free RAM space could not be determined because'
'"psutil" is not installed. '
'Setting precompute to False.')
return False
else:
if self.mne.is_epochs:
files = [self.mne.inst.filename]
else:
files = self.mne.inst.filenames
if files[0] is not None:
# Get disk-space of raw-file(s)
disk_space = 0
for fn in files:
disk_space += getsize(fn)
# Determine expected RAM space based on orig_format
fmt_multipliers = {'double': 1,
'single': 2,
'int': 2,
'short': 4}
# Epochs and ICA don't have this attribute, assume single
# on disk
fmt = getattr(self.mne.inst, 'orig_format', 'single')
# Apply size change to 64-bit float in memory
# (* 2 because when loading data will be loaded into a copy
# of self.mne.inst._data to apply processing.
expected_ram = disk_space * fmt_multipliers[fmt] * 2
else:
expected_ram = sys.getsizeof(self.mne.inst._data)
# Get available RAM
free_ram = psutil.virtual_memory().free
expected_ram_str = sizeof_fmt(expected_ram)
free_ram_str = sizeof_fmt(free_ram)
left_ram_str = sizeof_fmt(free_ram - expected_ram)
if expected_ram < free_ram:
logger.debug('The data precomputed for visualization takes '
f'{expected_ram_str} with {left_ram_str} of '
f'RAM left.')
return True
else:
logger.debug(f'The precomputed data with {expected_ram_str} '
f'will surpass your current {free_ram_str} '
f'of free RAM.\n'
'Thus precompute will be set to False.\n'
'(If you want to precompute nevertheless, '
'then set precompute to True instead of "auto")')
return False
def _process_data(self, data, start, stop, picks,
signals=None):
data = super()._process_data(data, start, stop, picks, signals)
# Invert Data to be displayed from top on inverted Y-Axis
data *= -1
return data
def _update_data(self):
if self.mne.data_precomputed:
# get start/stop-samples
start, stop = self._get_start_stop()
self.mne.times = self.mne.global_times[start:stop]
self.mne.data = self.mne.global_data[:, start:stop]
# remove DC locally
if self.mne.remove_dc:
self.mne.data = self.mne.data - \
self.mne.data.mean(axis=1, keepdims=True)
else:
# While data is not precomputed get data only from shown range and
# process only those.
super()._update_data()
# Initialize decim
self.mne.decim_data = np.ones_like(self.mne.picks)
data_picks_mask = np.in1d(self.mne.picks, self.mne.picks_data)
self.mne.decim_data[data_picks_mask] = self.mne.decim
# Get decim_times
if self.mne.decim != 1:
# decim can vary by channel type,
# so compute different `times` vectors.
self.mne.decim_times = {decim_value: self.mne.times[::decim_value]
+ self.mne.first_time for decim_value
in set(self.mne.decim_data)}
# Apply clipping
if self.mne.clipping == 'clamp':
self.mne.data = np.clip(self.mne.data, -0.5, 0.5)
elif self.mne.clipping is not None:
self.mne.data = self.mne.data.copy()
self.mne.data[abs(self.mne.data * self.mne.scale_factor)
> self.mne.clipping] = np.nan
# Apply Downsampling (if enabled)
self._apply_downsampling()
def _get_zscore(self, data):
# Reshape data to reasonable size for display
if QApplication.desktop() is None:
max_pixel_width = 3840 # default=UHD
else:
max_pixel_width = QApplication.desktop().screenGeometry().width()
collapse_by = data.shape[1] // max_pixel_width
data = data[:, :max_pixel_width * collapse_by]
if collapse_by > 0:
data = data.reshape(data.shape[0], max_pixel_width, collapse_by)
data = data.mean(axis=2)
z = zscore(data, axis=1)
if z.size > 0:
zmin = np.min(z, axis=1)
zmax = np.max(z, axis=1)
# Convert into RGBA
zrgba = np.empty((*z.shape, 4))
for row_idx, row in enumerate(z):
for col_idx, value in enumerate(row):
if math.isnan(value):
value = 0
if value == 0:
rgba = [0, 0, 0, 0]
elif value < 0:
alpha = int(255 * value / abs(zmin[row_idx]))
rgba = [0, 0, 255, alpha]
else:
alpha = int(255 * value / zmax[row_idx])
rgba = [255, 0, 0, alpha]
zrgba[row_idx, col_idx] = rgba
zrgba = np.require(zrgba, np.uint8, 'C')
self.mne.zscore_rgba = zrgba
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ANNOTATIONS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _add_region(self, plot_onset, duration, description, region=None):
if not region:
region = AnnotRegion(self.mne, description=description,
values=(plot_onset, plot_onset + duration))
if (any([self.mne.t_start < v < self.mne.t_start + self.mne.duration
for v in [plot_onset, plot_onset + duration]]) and
region not in self.mne.plt.items):
self.mne.plt.addItem(region)
self.mne.plt.addItem(region.label_item)
region.regionChangeFinished.connect(self._region_changed)
region.gotSelected.connect(self._region_selected)
region.removeRequested.connect(self._remove_region)
self.mne.viewbox.sigYRangeChanged.connect(region.update_label_pos)
self.mne.regions.append(region)
region.update_label_pos()
def _remove_region(self, region, from_annot=True):
# Remove from shown regions
if region.label_item in self.mne.viewbox.addedItems:
self.mne.viewbox.removeItem(region.label_item)
if region in self.mne.plt.items:
self.mne.plt.removeItem(region)
# Remove from all regions
if region in self.mne.regions:
self.mne.regions.remove(region)
# Reset selected region
if region == self.mne.selected_region:
self.mne.selected_region = None
# Remove from annotations
if from_annot:
idx = self._get_onset_idx(region.getRegion()[0])
self.mne.inst.annotations.delete(idx)
# Update Overview-Bar
self.mne.overview_bar.update_annotations()
def _region_selected(self, region):
old_region = self.mne.selected_region
# Remove selected-status from old region
if old_region and old_region != region:
old_region.select(False)
self.mne.selected_region = region
self.mne.fig_annotation.update_values(region)
def _get_onset_idx(self, plot_onset):
onset = _sync_onset(self.mne.inst, plot_onset, inverse=True)
idx = np.where(self.mne.inst.annotations.onset == onset)[0][0]
return idx
def _region_changed(self, region):
rgn = region.getRegion()
region.select(True)
idx = self._get_onset_idx(region.old_onset)
# Update Spinboxes of Annot-Dock
self.mne.fig_annotation.update_values(region)
# Change annotations
self.mne.inst.annotations.onset[idx] = _sync_onset(self.mne.inst,
rgn[0],
inverse=True)
self.mne.inst.annotations.duration[idx] = rgn[1] - rgn[0]
# Update overview-bar
self.mne.overview_bar.update_annotations()
def _draw_annotations(self):
# All regions are constantly added to the Scene and handled by Qt
# which is faster than handling adding/removing in Python.
pass
def _init_annot_mode(self):
self.mne.annotations_visible = True
self.mne.new_annotation_labels = self._get_annotation_labels()
if len(self.mne.new_annotation_labels) > 0:
self.mne.current_description = self.mne.new_annotation_labels[0]
else:
self.mne.current_description = None
self._setup_annotation_colors()
self.mne.regions = list()
self.mne.selected_region = None
# Initialize Annotation-Dock
existing_dock = getattr(self.mne, 'fig_annotation', None)
if existing_dock is None:
self.mne.fig_annotation = AnnotationDock(self)
self.addDockWidget(Qt.TopDockWidgetArea, self.mne.fig_annotation)
self.mne.fig_annotation.setVisible(False)
# Add annotations as regions
for annot in self.mne.inst.annotations:
plot_onset = _sync_onset(self.mne.inst, annot['onset'])
duration = annot['duration']
description = annot['description']
self._add_region(plot_onset, duration, description)
# Initialize showing annotation widgets
self._change_annot_mode()
def _change_annot_mode(self):
if not self.mne.annotation_mode:
# Reset Widgets in Annotation-Figure
self.mne.fig_annotation.reset()
# Show Annotation-Dock if activated.
self.mne.fig_annotation.setVisible(self.mne.annotation_mode)
# Make Regions movable if activated and move into foreground
for region in self.mne.regions:
region.setMovable(self.mne.annotation_mode)
if self.mne.annotation_mode:
region.setZValue(2)
else:
region.setZValue(0)
# Add/Remove selection-rectangle.
if self.mne.selected_region:
self.mne.selected_region.select(self.mne.annotation_mode)
def _toggle_annotation_fig(self):
if not self.mne.is_epochs:
self.mne.annotation_mode = not self.mne.annotation_mode
self._change_annot_mode()
def _update_regions_visible(self):
for region in self.mne.regions:
region.update_visible(
self.mne.visible_annotations[region.description])
self.mne.overview_bar.update_annotations()
def _set_annotations_visible(self, visible):
for descr in self.mne.visible_annotations:
self.mne.visible_annotations[descr] = visible
self._update_regions_visible()
# Update Plot
if visible:
self._update_annotations_xrange((self.mne.t_start,
self.mne.t_start +
self.mne.duration))
else:
for region in [r for r in self.mne.regions
if r in self.mne.plt.items]:
self.mne.plt.removeItem(region)
self.mne.plt.removeItem(region.label_item)
def _toggle_annotations(self):
self.mne.annotations_visible = not self.mne.annotations_visible
self._set_annotations_visible(self.mne.annotations_visible)
def _apply_update_projectors(self, toggle_all=False):
if toggle_all:
on = self.mne.projs_on
applied = self.mne.projs_active
value = False if all(on) else True
new_state = np.full_like(on, value)
# Always activate applied projections
new_state[applied] = True
self.mne.projs_on = new_state
self._update_projector()
# If data was precomputed it needs to be precomputed again.
self._rerun_precompute()
self._redraw()
def _toggle_proj_fig(self):
if self.mne.fig_proj is None:
ProjDialog(self, name='fig_proj')
else:
self.mne.fig_proj.close()
def _toggle_all_projs(self):
if self.mne.fig_proj is None:
self._apply_update_projectors(toggle_all=True)
else:
self.mne.fig_proj.toggle_all()
def _toggle_whitening(self):
super()._toggle_whitening()
# If data was precomputed it needs to be precomputed again.
self._rerun_precompute()
self._redraw()
def _toggle_settings_fig(self):
if self.mne.fig_settings is None:
SettingsDialog(self, name='fig_settings')
else:
self.mne.fig_help.close()
self.mne.fig_help = None
def _toggle_help_fig(self):
if self.mne.fig_help is None:
HelpDialog(self, name='fig_help')
else:
self.mne.fig_help.close()
self.mne.fig_help = None
def _set_butterfly(self, butterfly):
self.mne.butterfly = butterfly
self._update_picks()
self._update_data()
if butterfly and self.mne.fig_selection is not None:
self.mne.selection_ypos_dict.clear()
selections_dict = self._make_butterfly_selections_dict()
for idx, picks in enumerate(selections_dict.values()):
for pick in picks:
self.mne.selection_ypos_dict[pick] = idx + 1
ymax = len(selections_dict) + 1
self.mne.ymax = ymax
self.mne.plt.setLimits(yMax=ymax)
self.mne.plt.setYRange(0, ymax, padding=0)
elif butterfly:
ymax = len(self.mne.butterfly_type_order) + 1
self.mne.ymax = ymax
self.mne.plt.setLimits(yMax=ymax)
self.mne.plt.setYRange(0, ymax, padding=0)
else:
self.mne.ymax = len(self.mne.ch_order) + 1
self.mne.plt.setLimits(yMax=self.mne.ymax)
self.mne.plt.setYRange(self.mne.ch_start,
self.mne.ch_start + self.mne.n_channels + 1,
padding=0)
if self.mne.fig_selection is not None:
# Update Selection-Dialog
self.mne.fig_selection._style_butterfly()
# Set vertical scrollbar visible
self.mne.ax_vscroll.setVisible(not butterfly or
self.mne.fig_selection is not None)
# update overview-bar
self.mne.overview_bar.update_viewrange()
# update ypos and color for butterfly-mode
for trace in self.mne.traces:
trace.update_color()
trace.update_ypos()
self._draw_traces()
def _toggle_butterfly(self):
if self.mne.instance_type != 'ica':
self._set_butterfly(not self.mne.butterfly)
def _toggle_dc(self):
self.mne.remove_dc = not self.mne.remove_dc
self._redraw()
def _toggle_epoch_histogram(self):
fig = self._create_epoch_histogram()
self._get_dlg_from_mpl(fig)
def _set_events_visible(self, visible):
for event_line in self.mne.event_lines:
event_line.setVisible(visible)
# Update Plot
if visible:
self._update_events_xrange((self.mne.t_start,
self.mne.t_start +
self.mne.duration))
else:
for event_line in [evl for evl in self.mne.event_lines
if evl in self.mne.plt.items]:
self.mne.plt.removeItem(event_line)
self.mne.overview_bar.update_events()
def _toggle_events(self):
if self.mne.event_nums is not None:
self.mne.events_visible = not self.mne.events_visible
self._set_events_visible(self.mne.events_visible)
def _toggle_time_format(self):
if self.mne.time_format == 'float':
self.mne.time_format = 'clock'
self.mne.time_axis.setLabel(text='Time')
else:
self.mne.time_format = 'float'
self.mne.time_axis.setLabel(text='Time', units='s')
self._update_yaxis_labels()
def _toggle_fullscreen(self):
if self.isFullScreen():
self.showNormal()
else:
self.showFullScreen()
def _toggle_antialiasing(self):
self.mne.antialiasing = not self.mne.antialiasing
self._redraw()
def _toggle_overview_bar(self):
self.mne.show_overview_bar = not self.mne.show_overview_bar
self.mne.overview_bar.setVisible(self.mne.show_overview_bar)
def _toggle_zenmode(self):
self.mne.scrollbars_visible = not self.mne.scrollbars_visible
for bar in [self.mne.ax_hscroll, self.mne.ax_vscroll]:
bar.setVisible(self.mne.scrollbars_visible)
self.mne.toolbar.setVisible(self.mne.scrollbars_visible)
def _new_child_figure(self, fig_name, window_title, **kwargs):
from matplotlib.figure import Figure
fig = Figure(**kwargs)
# Pass window title and fig_name on
if fig_name is not None:
fig.fig_name = fig_name
if window_title is not None:
fig.title = window_title
return fig
def _get_widget_from_mpl(self, fig):
canvas = FigureCanvasQTAgg(fig)
canvas.setFocusPolicy(Qt.StrongFocus | Qt.WheelFocus)
canvas.setFocus()
# Pass window title and fig_name on
if hasattr(fig, 'fig_name'):
canvas.fig_name = fig.fig_name
if hasattr(fig, 'title'):
canvas.title = fig.title
return canvas
def _get_dlg_from_mpl(self, fig):
canvas = self._get_widget_from_mpl(fig)
# Pass window title and fig_name on
if hasattr(canvas, 'fig_name'):
name = canvas.fig_name
else:
name = None
if hasattr(canvas, 'title'):
title = canvas.title
else:
title = None
dlg = _BaseDialog(self, widget=canvas, title=title, name=name)
dlg.show()
def _create_ch_context_fig(self, idx):
fig = super()._create_ch_context_fig(idx)
if fig is not None:
self._get_dlg_from_mpl(fig)
def _toggle_epoch_histogramm(self):
if self.mne.is_epochs:
fig = self._create_epoch_histogram()
if fig is not None:
self._get_dlg_from_mpl(fig)
def _create_selection_fig(self):
if not any([isinstance(fig, SelectionDialog) for
fig in self.mne.child_figs]):
SelectionDialog(self)
def message_box(self, text, info_text=None, buttons=None,
default_button=None, icon=None, modal=True):
self.msg_box.setText(f'<font size="+2"><b>{text}</b></font>')
if info_text is not None:
self.msg_box.setInformativeText(info_text)
if buttons is not None:
self.msg_box.setStandardButtons(buttons)
if default_button is not None:
self.msg_box.setDefaultButton(default_button)
if icon is not None:
self.msg_box.setIcon(icon)
# Allow interacting with message_box in test-mode.
# Set modal=False only if no return value is expected.
self.msg_box.setModal(False if self.test_mode else modal)
if self.test_mode or not modal:
self.msg_box.show()
else:
return self.msg_box.exec()
def keyPressEvent(self, event):
"""Customize key press events."""
# On MacOs additionally KeypadModifier is set when arrow-keys
# are pressed.
# On Unix GroupSwitchModifier is set when ctrl is pressed.
# To preserve cross-platform consistency the following comparison
# of the modifier-values is done.
# modifiers need to be exclusive
modifiers = {
'Ctrl': '4' in hex(int(event.modifiers())),
'Shift': int(event.modifiers()) == 33554432
}
for key_name in self.mne.keyboard_shortcuts:
key_dict = self.mne.keyboard_shortcuts[key_name]
if key_dict['qt_key'] == event.key() and 'slot' in key_dict:
mod_idx = 0
# Get modifier
if 'modifier' in key_dict:
mods = [modifiers[mod] for mod in modifiers]
if any(mods):
# No multiple modifiers supported yet
mod = [mod for mod in modifiers if modifiers[mod]][0]
if mod in key_dict['modifier']:
mod_idx = key_dict['modifier'].index(mod)
slot_idx = mod_idx if mod_idx < len(key_dict['slot']) else 0
slot = key_dict['slot'][slot_idx]
if 'parameter' in key_dict:
param_idx = (mod_idx if mod_idx <
len(key_dict['parameter']) else 0)
slot(key_dict['parameter'][param_idx])
else:
slot()
break
def _draw_traces(self):
# Update data in traces (=drawing traces)
for trace in self.mne.traces:
# Update data
trace.update_data()
def _get_size(self):
inch_width = self.width() / self.logicalDpiX()
inch_height = self.height() / self.logicalDpiY()
return inch_width, inch_height
def _fake_keypress(self, key, fig=None):
fig = fig or self
if key.isupper():
key = key.lower()
modifier = Qt.ShiftModifier
elif key.startswith('shift+'):
key = key[6:]
modifier = Qt.ShiftModifier
else:
modifier = Qt.NoModifier
# Use pytest-qt's exception-hook
with capture_exceptions() as exceptions:
QTest.keyPress(fig, self.mne.keyboard_shortcuts[key]['qt_key'],
modifier)
for exc in exceptions:
raise RuntimeError(f'There as been an {exc[0]} inside the Qt '
f'event loop (look above for traceback).')
def _fake_click(self, point, add_points=None, fig=None, ax=None,
xform='ax', button=1, kind='press'):
add_points = add_points or list()
# Wait until Window is fully shown.
QTest.qWaitForWindowExposed(self)
# Scene-Dimensions still seem to change to final state when waiting
# for a short time.
QTest.qWait(10)
# Qt: right-button=2, matplotlib: right-button=3
if button == 1:
button = Qt.LeftButton
else:
button = Qt.RightButton
# For Qt, fig or ax both would be the widget to test interaction on.
# If View
fig = ax or fig or self.mne.view
if xform == 'ax':
# For Qt, the equivalent of matplotlibs transAxes
# would be a transformation to View Coordinates.
# But for the View top-left is (0, 0) and bottom-right is
# (view-width, view-height).
view_width = fig.width()
view_height = fig.height()
x = view_width * point[0]
y = view_height * (1 - point[1])
point = Point(x, y)
for idx, apoint in enumerate(add_points):
x2 = view_width * apoint[0]
y2 = view_height * (1 - apoint[1])
add_points[idx] = Point(x2, y2)
elif xform == 'data':
# For Qt, the equivalent of matplotlibs transData
# would be a transformation to
# the coordinate system of the ViewBox.
# This only works on the View (self.mne.view)
fig = self.mne.view
point = self.mne.viewbox.mapViewToScene(Point(*point))
for idx, apoint in enumerate(add_points):
add_points[idx] = self.mne.viewbox.mapViewToScene(
Point(*apoint))
elif xform == 'none' or xform is None:
if isinstance(point, (tuple, list)):
point = Point(*point)
else:
point = Point(point)
for idx, apoint in enumerate(add_points):
if isinstance(apoint, (tuple, list)):
add_points[idx] = Point(*apoint)
else:
add_points[idx] = Point(apoint)
# Use pytest-qt's exception-hook
with capture_exceptions() as exceptions:
widget = fig.viewport() if isinstance(fig, QGraphicsView) else fig
if kind == 'press':
# always click because most interactivity comes form
# mouseClickEvent from pyqtgraph (just press doesn't suffice
# here).
_mouseClick(widget=widget, pos=point, button=button)
elif kind == 'release':
_mouseRelease(widget=widget, pos=point, button=button)
elif kind == 'motion':
_mouseMove(widget=widget, pos=point, buttons=button)
elif kind == 'drag':
_mouseDrag(widget=widget, positions=[point] + add_points,
button=button)
for exc in exceptions:
raise RuntimeError(f'There as been an {exc[0]} inside the Qt '
f'event loop (look above for traceback).')
# Waiting some time for events to be processed.
QTest.qWait(50)
def _fake_scroll(self, x, y, step, fig=None):
# QTest doesn't support simulating scrolling-wheel
self.vscroll(step)
def _click_ch_name(self, ch_index, button):
self.mne.channel_axis.repaint()
# Wait because channel-axis may need time
# (came up with test_epochs::test_plot_epochs_clicks)
QTest.qWait(100)
if not self.mne.butterfly:
ch_name = self.mne.ch_names[self.mne.picks[ch_index]]
xrange, yrange = self.mne.channel_axis.ch_texts[ch_name]
x = np.mean(xrange)
y = np.mean(yrange)
self._fake_click((x, y), fig=self.mne.view, button=button,
xform='none')
def _update_trace_offsets(self):
"""legacy method for mne<1.0"""
pass
def _resize_by_factor(self, factor):
pass
def _get_ticklabels(self, orientation):
if orientation == 'x':
ax = self.mne.time_axis
else:
ax = self.mne.channel_axis
return list(ax.get_labels())
def _get_scale_bar_texts(self):
return tuple(t.toPlainText() for t in self.mne.scalebar_texts.values())
def show(self):
# Set raise_window like matplotlib if possible
super().show()
try:
from matplotlib import rcParams
raise_window = rcParams['figure.raise_window']
except ImportError:
raise_window = True
if raise_window:
self.activateWindow()
self.raise_()
def _close_event(self, fig=None):
"""Force calling of the MPL figure close event."""
fig = fig or self
if hasattr(fig, 'canvas'):
try:
fig.canvas.close_event()
except ValueError: # old mpl with Qt
pass # pragma: no cover
else:
fig.close()
def closeEvent(self, event):
"""Customize close event."""
event.accept()
if hasattr(self, 'mne'):
# Explicit disconnects to avoid reference cycles that gc can't
# properly resolve ()
if hasattr(self.mne, 'plt'):
_disconnect(self.mne.plt.sigXRangeChanged)
_disconnect(self.mne.plt.sigYRangeChanged)
if hasattr(self.mne, 'toolbar'):
for action in self.mne.toolbar.actions():
_disconnect(action.triggered)
# Save settings going into QSettings.
for qsetting in qsettings_params:
value = getattr(self.mne, qsetting)
QSettings().setValue(qsetting, value)
for attr in ('keyboard_shortcuts', 'traces', 'plt', 'toolbar'):
if hasattr(self.mne, attr):
delattr(self.mne, attr)
if hasattr(self.mne, 'child_figs'):
for fig in self.mne.child_figs:
fig.close()
self.mne.child_figs.clear()
for attr in ('traces', 'event_lines', 'regions'):
getattr(self.mne, attr, []).clear()
if getattr(self.mne, 'vline', None) is not None:
if self.mne.is_epochs:
for vl in self.mne.vline:
_disconnect(vl.sigPositionChangeFinished)
self.mne.vline.clear()
else:
_disconnect(self.mne.vline.sigPositionChangeFinished)
if getattr(self, 'load_thread', None) is not None:
self.load_thread.clean()
self.load_thread = None
# Remove self from browser_instances in globals
if self in _browser_instances:
_browser_instances.remove(self)
self._close(event)
self.gotClosed.emit()
# Make sure PyQtBrowser gets deleted after it was closed.
self.deleteLater()
def _get_n_figs():
# Wait for a short time to let the Qt-loop clean up
QTest.qWait(100)
return len([window for window in QApplication.topLevelWindows()
if window.isVisible()])
def _close_all():
if len(QApplication.topLevelWindows()) > 0:
QApplication.closeAllWindows()
# mouse testing functions adapted from pyqtgraph
# (pyqtgraph.tests.ui_testing.py)
def _mousePress(widget, pos, button, modifier=None):
if modifier is None:
modifier = Qt.KeyboardModifier.NoModifier
event = QMouseEvent(QEvent.Type.MouseButtonPress, pos, button,
Qt.MouseButton.NoButton, modifier)
QApplication.sendEvent(widget, event)
def _mouseRelease(widget, pos, button, modifier=None):
if modifier is None:
modifier = Qt.KeyboardModifier.NoModifier
event = QMouseEvent(QEvent.Type.MouseButtonRelease, pos,
button, Qt.MouseButton.NoButton, modifier)
QApplication.sendEvent(widget, event)
def _mouseMove(widget, pos, buttons=None, modifier=None):
if buttons is None:
buttons = Qt.MouseButton.NoButton
if modifier is None:
modifier = Qt.KeyboardModifier.NoModifier
event = QMouseEvent(QEvent.Type.MouseMove, pos,
Qt.MouseButton.NoButton, buttons, modifier)
QApplication.sendEvent(widget, event)
def _mouseClick(widget, pos, button, modifier=None):
_mouseMove(widget, pos)
_mousePress(widget, pos, button, modifier)
_mouseRelease(widget, pos, button, modifier)
def _mouseDrag(widget, positions, button, modifier=None):
_mouseMove(widget, positions[0])
_mousePress(widget, positions[0], button, modifier)
# Delay for 10 ms for drag to be recognized.
QTest.qWait(10)
for pos in positions[1:]:
_mouseMove(widget, pos, button, modifier)
_mouseRelease(widget, positions[-1], button, modifier)
def _init_browser(**kwargs):
setConfigOption('enableExperimental', True)
_init_mne_qtapp(pg_app=True)
browser = PyQtGraphBrowser(**kwargs)
return browser
| 39.043459 | 79 | 0.563492 |
import datetime
import functools
import gc
import math
import platform
import sys
from ast import literal_eval
from collections import OrderedDict
from contextlib import contextmanager
from copy import copy
from functools import partial
from os.path import getsize
import numpy as np
from PyQt5.QtCore import (QEvent, QThread, Qt, pyqtSignal, QRectF, QLineF,
QPoint, QSettings)
from PyQt5.QtGui import (QFont, QIcon, QPixmap, QTransform,
QMouseEvent, QImage, QPainter, QPainterPath)
from PyQt5.QtTest import QTest
from PyQt5.QtWidgets import (QAction, QColorDialog, QComboBox, QDialog,
QDockWidget, QDoubleSpinBox, QFormLayout,
QGridLayout, QHBoxLayout, QInputDialog,
QLabel, QMainWindow, QMessageBox,
QPushButton, QScrollBar, QToolTip, QWidget,
QStyleOptionSlider, QStyle,
QApplication, QGraphicsView, QProgressBar,
QVBoxLayout, QLineEdit, QCheckBox, QScrollArea,
QGraphicsLineItem, QGraphicsScene, QTextEdit,
QSizePolicy, QSpinBox, QDesktopWidget, QSlider)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.colors import to_rgba_array
from pyqtgraph import (AxisItem, GraphicsView, InfLineLabel, InfiniteLine,
LinearRegionItem, PlotCurveItem, PlotItem,
Point, TextItem, ViewBox, mkBrush,
mkPen, setConfigOption, mkColor)
from scipy.stats import zscore
from mne.viz import plot_sensors
from mne.viz._figure import BrowserBase
from mne.viz.utils import _simplify_float, _merge_annotations, _figure_agg
from mne.annotations import _sync_onset
from mne.io.pick import (_DATA_CH_TYPES_ORDER_DEFAULT,
channel_indices_by_type, _DATA_CH_TYPES_SPLIT)
from mne.utils import _to_rgb, logger, sizeof_fmt, warn, get_config
from . import _browser_instances
from .icons import resources
try:
from pytestqt.exceptions import capture_exceptions
except ImportError:
logger.debug('If pytest-qt is not installed, the errors from inside '
'the Qt-loop will be occluded and it will be harder '
'to trace back the cause.')
@contextmanager
def capture_exceptions():
yield []
name = 'pyqtgraph'
try:
from mne.viz.backends._utils import _init_mne_qtapp
except ImportError:
from mne.viz.backends._utils import _init_qt_resources
def _init_mne_qtapp(enable_icon=True, pg_app=False):
"""Get QApplication-instance for MNE-Python.
Parameter
---------
enable_icon: bool
If to set an MNE-icon for the app.
pg_app: bool
If to create the QApplication with pyqtgraph. For an until know
undiscovered reason the pyqtgraph-browser won't show without
mkQApp from pyqtgraph.
Returns
-------
app: ``PyQt5.QtWidgets.QApplication``
Instance of QApplication.
"""
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QIcon
app_name = 'MNE-Python'
organization_name = 'MNE'
# Fix from cbrnr/mnelab for app name in menu bar
if sys.platform.startswith("darwin"):
try:
# set bundle name on macOS (app name shown in the menu bar)
from Foundation import NSBundle
bundle = NSBundle.mainBundle()
info = (bundle.localizedInfoDictionary()
or bundle.infoDictionary())
info["CFBundleName"] = app_name
except ModuleNotFoundError:
pass
if pg_app:
from pyqtgraph import mkQApp
app = mkQApp(app_name)
else:
app = (QApplication.instance()
or QApplication(sys.argv or [app_name]))
app.setApplicationName(app_name)
app.setOrganizationName(organization_name)
if enable_icon:
# Set icon
_init_qt_resources()
kind = 'bigsur-' if platform.mac_ver()[0] >= '10.16' else ''
app.setWindowIcon(QIcon(f":/mne-{kind}icon.png"))
return app
def _get_color(color_spec):
try:
# Convert matplotlib color-names if possible
color_spec = _to_rgb(color_spec, alpha=True)
except ValueError:
pass
# Convert tuples of floats from 0-1 to 0-255 for pyqtgraph
if (isinstance(color_spec, tuple) and
all([i <= 1 for i in color_spec])):
color_spec = tuple([int(i * 255) for i in color_spec])
try:
color = mkColor(color_spec)
except ValueError:
raise ValueError(f'"{color_spec}" is not a valid matplotlib '
f'color-specifier!') from None
return color
def propagate_to_children(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
propagate = kwargs.pop('propagate', True)
result = method(*args, **kwargs)
if args[0].mne.is_epochs and propagate:
# parent always goes first
if hasattr(args[0], 'child_traces'):
for child_trace in args[0].child_traces:
getattr(child_trace, method.__name__)(*args[1:], **kwargs)
return result
return wrapper
class DataTrace(PlotCurveItem):
def __init__(self, main, ch_idx, child_idx=None, parent_trace=None):
super().__init__()
self.main = main
self.mne = main.mne
# Set clickable with small area around trace to make clicking easier.
self.setClickable(True, 12)
# Set default z-value to 1 to be before other items in scene
self.setZValue(1)
# General attributes
# The ch_idx is the index of the channel represented by this trace
# in the channel-order from the unchanged instance (which also picks
# refer to).
self.ch_idx = None
# The range_idx is the index of the channel represented by this trace
# in the shown range.
self.range_idx = None
# The order_idx is the index of the channel represented by this trace
# in the channel-order (defined e.g. by group_by).
self.order_idx = None
# Name of the channel the trace represents.
self.ch_name = None
# Indicates if trace is bad.
self.isbad = None
# Channel-type of trace.
self.ch_type = None
# Color-specifier (all possible matplotlib color formats)
self.color = None
# Attributes for epochs-mode
# Index of child if child.
self.child_idx = child_idx
# Reference to parent if child.
self.parent_trace = parent_trace
# Only for parent traces
if self.parent_trace is None:
# Add to main trace list
self.mne.traces.append(self)
# References to children
self.child_traces = list()
# Colors of trace in viewrange
self.trace_colors = None
# set attributes
self.set_ch_idx(ch_idx)
self.update_color()
self.update_scale()
# Avoid calling self.update_data() twice on initialization
# (because of update_scale()).
if self.mne.clipping is None:
self.update_data()
# Add to main plot
self.mne.plt.addItem(self)
@propagate_to_children
def remove(self):
self.mne.plt.removeItem(self)
# Only for parent trace
if self.parent_trace is None:
self.mne.traces.remove(self)
self.deleteLater()
@propagate_to_children
def update_color(self):
# Epochs
if self.mne.is_epochs:
# Add child traces if shown trace needs to have multiple colors
# (PlotCurveItem only supports one color per object).
# There always as many color-specific traces added depending
# on the whole time range of the instance regardless of the
# currently visible time range (to avoid checking for new colors
# while scrolling horizontally).
# Only for parent trace
if hasattr(self, 'child_traces'):
self.trace_colors = np.unique(
self.mne.epoch_color_ref[self.ch_idx], axis=0)
n_childs = len(self.child_traces)
trace_diff = len(self.trace_colors) - n_childs - 1
# Add child traces if necessary
if trace_diff > 0:
for cix in range(n_childs, n_childs + trace_diff):
child = DataTrace(self.main, self.ch_idx,
child_idx=cix, parent_trace=self)
self.child_traces.append(child)
elif trace_diff < 0:
for _ in range(abs(trace_diff)):
rm_trace = self.child_traces.pop()
rm_trace.remove()
# Set parent color
self.color = self.trace_colors[0]
# Only for child trace
else:
self.color = self.parent_trace.trace_colors[
self.child_idx + 1]
# Raw/ICA
else:
if self.isbad:
self.color = self.mne.ch_color_bad
else:
self.color = self.mne.ch_color_ref[self.ch_name]
self.setPen(_get_color(self.color))
@propagate_to_children
def update_range_idx(self):
self.range_idx = np.argwhere(self.mne.picks == self.ch_idx)[0][0]
@propagate_to_children
def update_ypos(self):
if self.mne.butterfly and self.mne.fig_selection is not None:
self.ypos = self.mne.selection_ypos_dict[self.ch_idx]
elif self.mne.fig_selection is not None and \
self.mne.old_selection == 'Custom':
self.ypos = self.range_idx + 1
elif self.mne.butterfly:
self.ypos = self.mne.butterfly_type_order.index(self.ch_type) + 1
else:
self.ypos = self.range_idx + self.mne.ch_start + 1
@propagate_to_children
def update_scale(self):
transform = QTransform()
transform.scale(1., self.mne.scale_factor)
self.setTransform(transform)
if self.mne.clipping is not None:
self.update_data(propagate=False)
@propagate_to_children
def set_ch_idx(self, ch_idx):
# The ch_idx is the index of the channel represented by this trace
# in the channel-order from the unchanged instance (which also picks
# refer to).
self.ch_idx = ch_idx
# The range_idx is the index of the channel represented by this trace
# in the shown range.
self.update_range_idx(propagate=False)
# The order_idx is the index of the channel represented by this trace
# in the channel-order (defined e.g. by group_by).
self.order_idx = np.argwhere(self.mne.ch_order == self.ch_idx)[0][0]
self.ch_name = self.mne.inst.ch_names[ch_idx]
self.isbad = self.ch_name in self.mne.info['bads']
self.ch_type = self.mne.ch_types[ch_idx]
self.update_ypos(propagate=False)
@propagate_to_children
def update_data(self):
if self.mne.is_epochs or (self.mne.clipping is not None and
self.mne.clipping != 'clamp'):
connect = 'finite'
skip = False
else:
connect = 'all'
skip = True
if self.mne.data_precomputed:
data = self.mne.data[self.order_idx]
else:
data = self.mne.data[self.range_idx]
# Get decim-specific time if enabled
if self.mne.decim != 1:
times = self.mne.decim_times[self.mne.decim_data[self.range_idx]]
data = data[..., ::self.mne.decim_data[self.range_idx]]
else:
times = self.mne.times
# For multiple color traces with epochs
# replace values from other colors with NaN.
if self.mne.is_epochs:
data = np.copy(data)
check_color = self.mne.epoch_color_ref[self.ch_idx,
self.mne.epoch_idx]
bool_ixs = np.invert(np.equal(self.color, check_color).all(axis=1))
starts = self.mne.boundary_times[self.mne.epoch_idx][bool_ixs]
stops = self.mne.boundary_times[self.mne.epoch_idx + 1][bool_ixs]
for start, stop in zip(starts, stops):
data[np.logical_and(start <= times, times <= stop)] = np.nan
self.setData(times, data, connect=connect, skipFiniteCheck=skip,
antialias=self.mne.antialiasing)
self.setPos(0, self.ypos)
def toggle_bad(self, x=None):
# Toggle bad epoch
if self.mne.is_epochs and x is not None:
epoch_idx, color = self.main._toggle_bad_epoch(x)
# Update epoch color
if color != 'none':
new_epo_color = np.repeat(to_rgba_array(color),
len(self.mne.inst.ch_names), axis=0)
elif self.mne.epoch_colors is None:
new_epo_color = np.concatenate(
[to_rgba_array(c) for c
in self.mne.ch_color_ref.values()])
else:
new_epo_color = \
np.concatenate([to_rgba_array(c) for c in
self.mne.epoch_colors[epoch_idx]])
# Update bad channel colors
bad_idxs = np.in1d(self.mne.ch_names, self.mne.info['bads'])
new_epo_color[bad_idxs] = to_rgba_array(self.mne.ch_color_bad)
self.mne.epoch_color_ref[:, epoch_idx] = new_epo_color
# Update overview-bar
self.mne.overview_bar.update_bad_epochs()
# Update other traces inlcuding self
for trace in self.mne.traces:
trace.update_color()
# Update data is necessary because colored segments will vary
trace.update_data()
# Toggle bad channel
else:
bad_color, pick, marked_bad = self.main._toggle_bad_channel(
self.range_idx)
# Update line color status
self.isbad = not self.isbad
# Update colors for epochs
if self.mne.is_epochs:
if marked_bad:
new_ch_color = np.repeat(to_rgba_array(bad_color),
len(self.mne.inst), axis=0)
elif self.mne.epoch_colors is None:
ch_color = self.mne.ch_color_ref[self.ch_name]
new_ch_color = np.repeat(to_rgba_array(ch_color),
len(self.mne.inst), axis=0)
else:
new_ch_color = np.concatenate([to_rgba_array(c[pick]) for
c in self.mne.epoch_colors])
self.mne.epoch_color_ref[pick, :] = new_ch_color
# Update trace color
self.update_color()
if self.mne.is_epochs:
self.update_data()
# Update channel-axis
self.main._update_yaxis_labels()
# Update overview-bar
self.mne.overview_bar.update_bad_channels()
# Update sensor color (if in selection mode)
if self.mne.fig_selection is not None:
self.mne.fig_selection._update_bad_sensors(pick, marked_bad)
def mouseClickEvent(self, ev):
if (not self.clickable or ev.button() != Qt.MouseButton.LeftButton
or self.mne.annotation_mode):
# Explicitly ignore events in annotation-mode
ev.ignore()
return
if self.mouseShape().contains(ev.pos()):
ev.accept()
self.toggle_bad(ev.pos().x())
def get_xdata(self):
return self.xData
def get_ydata(self):
return self.yData + self.ypos
class TimeAxis(AxisItem):
def __init__(self, mne):
self.mne = mne
self._spacing = None
super().__init__(orientation='bottom')
def tickValues(self, minVal, maxVal, size):
if self.mne.is_epochs:
value_idxs = np.searchsorted(self.mne.midpoints, [minVal, maxVal])
values = self.mne.midpoints[slice(*value_idxs)]
spacing = len(self.mne.inst.times) / self.mne.info['sfreq']
tick_values = [(spacing, values)]
return tick_values
else:
# Save _spacing for later use
self._spacing = self.tickSpacing(minVal, maxVal, size)
return super().tickValues(minVal, maxVal, size)
def tickStrings(self, values, scale, spacing):
if self.mne.is_epochs:
epoch_nums = self.mne.inst.selection
ts = epoch_nums[np.searchsorted(self.mne.midpoints, values)]
tick_strings = [str(v) for v in ts]
elif self.mne.time_format == 'clock':
meas_date = self.mne.info['meas_date']
first_time = datetime.timedelta(seconds=self.mne.inst.first_time)
digits = np.ceil(-np.log10(min(v[0] for v in self._spacing)
) + 1).astype(int)
tick_strings = list()
for val in values:
val_time = datetime.timedelta(seconds=val) + \
first_time + meas_date
val_str = val_time.strftime('%H:%M:%S')
if int(val_time.microsecond):
val_str += \
f'{round(val_time.microsecond * 1e-6, digits)}'[1:]
tick_strings.append(val_str)
else:
tick_strings = super().tickStrings(values, scale, spacing)
return tick_strings
def repaint(self):
self.picture = None
self.update()
def get_labels(self):
values = self.tickValues(*self.mne.viewbox.viewRange()[0],
self.mne.xmax)
labels = list()
for spacing, vals in values:
labels += self.tickStrings(vals, 1, spacing)
return labels
class ChannelAxis(AxisItem):
def __init__(self, main):
self.main = main
self.mne = main.mne
self.ch_texts = OrderedDict()
super().__init__(orientation='left')
self.style['autoReduceTextSpace'] = False
def tickValues(self, minVal, maxVal, size):
minVal, maxVal = sorted((minVal, maxVal))
values = list(range(round(minVal) + 1, round(maxVal)))
tick_values = [(1, values)]
return tick_values
def tickStrings(self, values, scale, spacing):
# Get channel-names
if self.mne.butterfly and self.mne.fig_selection is not None:
tick_strings = list(self.main._make_butterfly_selections_dict())
elif self.mne.butterfly:
_, ixs, _ = np.intersect1d(_DATA_CH_TYPES_ORDER_DEFAULT,
self.mne.ch_types, return_indices=True)
ixs.sort()
tick_strings = np.array(_DATA_CH_TYPES_ORDER_DEFAULT)[ixs]
else:
# Get channel-names and by substracting 1 from tick-values
# since the first channel starts at y=1.
tick_strings = self.mne.ch_names[
self.mne.ch_order[[v - 1 for v in values]]]
return tick_strings
def drawPicture(self, p, axisSpec, tickSpecs, textSpecs):
super().drawPicture(p, axisSpec, tickSpecs, textSpecs)
for rect, flags, text in textSpecs:
if self.mne.butterfly and self.mne.fig_selection is not None:
p.setPen(_get_color('black'))
elif self.mne.butterfly:
p.setPen(_get_color(self.mne.ch_color_dict[text]))
elif text in self.mne.info['bads']:
p.setPen(_get_color(self.mne.ch_color_bad))
else:
p.setPen(_get_color(self.mne.ch_color_ref[text]))
self.ch_texts[text] = ((rect.left(), rect.left() + rect.width()),
(rect.top(), rect.top() + rect.height()))
p.drawText(rect, int(flags), text)
def repaint(self):
self.picture = None
self.update()
def mouseClickEvent(self, event):
# Clean up channel-texts
if not self.mne.butterfly:
self.ch_texts = {k: v for k, v in self.ch_texts.items()
if k in [tr.ch_name for tr in self.mne.traces]}
# Get channel-name from position of channel-description
ypos = event.scenePos().y()
y_values = np.asarray(list(self.ch_texts.values()))[:, 1, :]
y_diff = np.abs(y_values - ypos)
ch_idx = int(np.argmin(y_diff, axis=0)[0])
ch_name = list(self.ch_texts.keys())[ch_idx]
trace = [tr for tr in self.mne.traces
if tr.ch_name == ch_name][0]
if event.button() == Qt.LeftButton:
trace.toggle_bad()
elif event.button() == Qt.RightButton:
self.main._create_ch_context_fig(trace.range_idx)
def get_labels(self):
values = self.tickValues(*self.mne.viewbox.viewRange()[1], None)
labels = self.tickStrings(values[0][1], None, None)
return labels
class BaseScrollBar(QScrollBar):
def __init__(self, parent=None):
super().__init__(parent)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
opt = QStyleOptionSlider()
self.initStyleOption(opt)
control = self.style().hitTestComplexControl(
QStyle.CC_ScrollBar, opt,
event.pos(), self)
if (control == QStyle.SC_ScrollBarAddPage or
control == QStyle.SC_ScrollBarSubPage):
# scroll here
gr = self.style().subControlRect(QStyle.CC_ScrollBar,
opt,
QStyle.SC_ScrollBarGroove,
self)
sr = self.style().subControlRect(QStyle.CC_ScrollBar,
opt,
QStyle.SC_ScrollBarSlider,
self)
if self.orientation() == Qt.Horizontal:
pos = event.pos().x()
sliderLength = sr.width()
sliderMin = gr.x()
sliderMax = gr.right() - sliderLength + 1
if (self.layoutDirection() == Qt.RightToLeft):
opt.upsideDown = not opt.upsideDown
else:
pos = event.pos().y()
sliderLength = sr.height()
sliderMin = gr.y()
sliderMax = gr.bottom() - sliderLength + 1
self.setValue(QStyle.sliderValueFromPosition(
self.minimum(), self.maximum(),
pos - sliderMin, sliderMax - sliderMin,
opt.upsideDown))
return
return super().mousePressEvent(event)
class TimeScrollBar(BaseScrollBar):
def __init__(self, mne):
super().__init__(Qt.Horizontal)
self.mne = mne
self.step_factor = 1
self.setMinimum(0)
self.setSingleStep(1)
self.update_duration()
self.setFocusPolicy(Qt.WheelFocus)
# Because valueChanged is needed (captures every input to scrollbar,
# not just sliderMoved), there has to be made a differentiation
# between internal and external changes.
self.external_change = False
self.valueChanged.connect(self._time_changed)
def _time_changed(self, value):
if not self.external_change:
if self.mne.is_epochs:
# Convert Epoch index to time
value = self.mne.boundary_times[int(value)]
else:
value /= self.step_factor
self.mne.plt.setXRange(value, value + self.mne.duration,
padding=0)
def update_value(self, value):
# Mark change as external to avoid setting
# XRange again in _time_changed.
self.external_change = True
if self.mne.is_epochs:
set_value = np.searchsorted(self.mne.midpoints, value)
else:
set_value = int(value * self.step_factor)
self.setValue(set_value)
self.external_change = False
def update_duration(self):
if self.mne.is_epochs:
self.setPageStep(self.mne.n_epochs)
self.setMaximum(len(self.mne.inst) - self.mne.n_epochs)
else:
self.setPageStep(int(self.mne.duration))
self.step_factor = self.mne.scroll_sensitivity / self.mne.duration
self.setMaximum(int((self.mne.xmax - self.mne.duration)
* self.step_factor))
def _update_scroll_sensitivity(self):
self.update_duration()
self.update_value(self.value() / self.step_factor)
def keyPressEvent(self, event):
# Let main handle the keypress
event.ignore()
class ChannelScrollBar(BaseScrollBar):
def __init__(self, mne):
super().__init__(Qt.Vertical)
self.mne = mne
self.setMinimum(0)
self.setSingleStep(1)
self.update_nchan()
self.setFocusPolicy(Qt.WheelFocus)
# Because valueChanged is needed (captures every input to scrollbar,
# not just sliderMoved), there has to be made a differentiation
# between internal and external changes.
self.external_change = False
self.valueChanged.connect(self._channel_changed)
def _channel_changed(self, value):
if not self.external_change:
if self.mne.fig_selection:
label = list(self.mne.ch_selections.keys())[value]
self.mne.fig_selection._chkbx_changed(label)
elif not self.mne.butterfly:
value = min(value, self.mne.ymax - self.mne.n_channels)
self.mne.plt.setYRange(value, value + self.mne.n_channels + 1,
padding=0)
def update_value(self, value):
# Mark change as external to avoid setting YRange again in
# _channel_changed.
self.external_change = True
self.setValue(value)
self.external_change = False
def update_nchan(self):
if getattr(self.mne, 'group_by', None) in ['position', 'selection']:
self.setPageStep(1)
self.setMaximum(len(self.mne.ch_selections) - 1)
else:
self.setPageStep(self.mne.n_channels)
self.setMaximum(self.mne.ymax - self.mne.n_channels - 1)
def keyPressEvent(self, event):
# Let main handle the keypress
event.ignore()
class OverviewBar(QGraphicsView):
def __init__(self, main):
super().__init__(QGraphicsScene())
self.main = main
self.mne = main.mne
self.bg_img = None
self.bg_pxmp = None
self.bg_pxmp_item = None
# Set minimum Size to 1/10 of display size
min_h = int(QApplication.desktop().screenGeometry().height() / 10)
self.setMinimumSize(1, 1)
self.setFixedHeight(min_h)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.set_background()
# Initialize Graphics-Items
# Bad channels
self.bad_line_dict = dict()
self.update_bad_channels()
# Events
self.event_line_dict = dict()
self.update_events()
if self.mne.is_epochs:
# Epochs Lines
self.epoch_line_dict = dict()
self.update_epoch_lines()
self.bad_epoch_rect_dict = dict()
self.update_bad_epochs()
else:
# Annotations
self.annotations_rect_dict = dict()
self.update_annotations()
# VLine
self.v_line = None
self.update_vline()
# View Range
self.viewrange_rect = None
self.update_viewrange()
def update_epoch_lines(self):
epoch_line_pen = mkPen(color='k', width=1)
for t in self.mne.boundary_times[1:-1]:
top_left = self._mapFromData(t, 0)
bottom_right = self._mapFromData(t, len(self.mne.ch_order))
line = self.scene().addLine(QLineF(top_left, bottom_right),
epoch_line_pen)
line.setZValue(1)
self.epoch_line_dict[t] = line
def update_bad_channels(self):
bad_set = set(self.mne.info['bads'])
line_set = set(self.bad_line_dict.keys())
add_chs = bad_set.difference(line_set)
rm_chs = line_set.difference(bad_set)
for line_idx, ch_idx in enumerate(self.mne.ch_order):
ch_name = self.mne.ch_names[ch_idx]
if ch_name in add_chs:
start = self._mapFromData(0, line_idx)
stop = self._mapFromData(self.mne.inst.times[-1], line_idx)
pen = _get_color(self.mne.ch_color_bad)
line = self.scene().addLine(QLineF(start, stop), pen)
line.setZValue(2)
self.bad_line_dict[ch_name] = line
elif ch_name in rm_chs:
self.scene().removeItem(self.bad_line_dict[ch_name])
self.bad_line_dict.pop(ch_name)
def update_bad_epochs(self):
bad_set = set(self.mne.bad_epochs)
rect_set = set(self.bad_epoch_rect_dict.keys())
add_epos = bad_set.difference(rect_set)
rm_epos = rect_set.difference(bad_set)
for epo_num in self.mne.inst.selection:
if epo_num in add_epos:
epo_idx = self.mne.inst.selection.tolist().index(epo_num)
start, stop = self.mne.boundary_times[epo_idx:epo_idx + 2]
top_left = self._mapFromData(start, 0)
bottom_right = self._mapFromData(stop, len(self.mne.ch_order))
pen = _get_color(self.mne.epoch_color_bad)
rect = self.scene().addRect(QRectF(top_left, bottom_right),
pen=pen, brush=pen)
rect.setZValue(3)
self.bad_epoch_rect_dict[epo_num] = rect
elif epo_num in rm_epos:
self.scene().removeItem(self.bad_epoch_rect_dict[epo_num])
self.bad_epoch_rect_dict.pop(epo_num)
def update_events(self):
if getattr(self.mne, 'event_nums', None) is not None \
and self.mne.events_visible:
for ev_t, ev_id in zip(self.mne.event_times, self.mne.event_nums):
color_name = self.mne.event_color_dict[ev_id]
color = _get_color(color_name)
color.setAlpha(100)
pen = mkPen(color)
top_left = self._mapFromData(ev_t, 0)
bottom_right = self._mapFromData(ev_t, len(self.mne.ch_order))
line = self.scene().addLine(QLineF(top_left, bottom_right),
pen)
line.setZValue(1)
self.event_line_dict[ev_t] = line
else:
for event_line in self.event_line_dict.values():
self.scene().removeItem(event_line)
self.event_line_dict.clear()
def update_annotations(self):
annotations = self.mne.inst.annotations
# Exclude non-visible annotations
annot_set = set([annot['onset'] for annot in annotations if
self.mne.visible_annotations[annot['description']]])
rect_set = set(self.annotations_rect_dict.keys())
add_onsets = annot_set.difference(rect_set)
rm_onsets = rect_set.difference(annot_set)
# Add missing onsets
for add_onset in add_onsets:
plot_onset = _sync_onset(self.mne.inst, add_onset)
annot_idx = np.argwhere(self.mne.inst.annotations.onset
== add_onset)[0][0]
duration = annotations.duration[annot_idx]
description = annotations.description[annot_idx]
color_name = self.mne.annotation_segment_colors[description]
color = _get_color(color_name)
color.setAlpha(150)
pen = mkPen(color)
brush = mkBrush(color)
top_left = self._mapFromData(plot_onset, 0)
bottom_right = self._mapFromData(plot_onset + duration,
len(self.mne.ch_order))
rect = self.scene().addRect(QRectF(top_left, bottom_right),
pen, brush)
rect.setZValue(3)
self.annotations_rect_dict[add_onset] = {'rect': rect,
'plot_onset': plot_onset,
'duration': duration,
'color': color_name}
# Remove onsets
for rm_onset in rm_onsets:
self.scene().removeItem(self.annotations_rect_dict[rm_onset]
['rect'])
self.annotations_rect_dict.pop(rm_onset)
# Changes
for edit_onset in self.annotations_rect_dict:
plot_onset = _sync_onset(self.mne.inst, edit_onset)
annot_idx = np.where(annotations.onset == edit_onset)[0][0]
duration = annotations.duration[annot_idx]
rect_duration = self.annotations_rect_dict[edit_onset]['duration']
rect = self.annotations_rect_dict[edit_onset]['rect']
# Update changed duration
if duration != rect_duration:
self.annotations_rect_dict[edit_onset]['duration'] = duration
top_left = self._mapFromData(plot_onset, 0)
bottom_right = self._mapFromData(plot_onset + duration,
len(self.mne.ch_order))
rect.setRect(QRectF(top_left, bottom_right))
# Update changed color
description = annotations.description[annot_idx]
color_name = self.mne.annotation_segment_colors[description]
rect_color = self.annotations_rect_dict[edit_onset]['color']
if color_name != rect_color:
color = _get_color(color_name)
color.setAlpha(150)
pen = mkPen(color)
brush = mkBrush(color)
rect.setPen(pen)
rect.setBrush(brush)
def update_vline(self):
if self.mne.is_epochs:
# VLine representation not useful in epochs-mode
pass
# Add VLine-Representation
elif self.mne.vline is not None:
value = self.mne.vline.value()
top_left = self._mapFromData(value, 0)
bottom_right = self._mapFromData(value, len(self.mne.ch_order))
line = QLineF(top_left, bottom_right)
if self.v_line is None:
pen = mkPen('g')
self.v_line = self.scene().addLine(line, pen)
self.v_line.setZValue(1)
else:
self.v_line.setLine(line)
# Remove VLine-Representation
elif self.v_line is not None:
self.scene().removeItem(self.v_line)
self.v_line = None
def update_viewrange(self):
if self.mne.butterfly:
top_left = self._mapFromData(self.mne.t_start, 0)
bottom_right = self._mapFromData(self.mne.t_start +
self.mne.duration, self.mne.ymax)
else:
top_left = self._mapFromData(self.mne.t_start, self.mne.ch_start)
bottom_right = self._mapFromData(self.mne.t_start
+ self.mne.duration,
self.mne.ch_start
+ self.mne.n_channels)
rect = QRectF(top_left, bottom_right)
if self.viewrange_rect is None:
pen = mkPen(color='g')
brush = mkBrush(color=(0, 0, 0, 100))
self.viewrange_rect = self.scene().addRect(rect, pen, brush)
self.viewrange_rect.setZValue(4)
else:
self.viewrange_rect.setRect(rect)
def _set_range_from_pos(self, pos):
x, y = self._mapToData(pos)
# Set X
# Check boundaries
if self.mne.is_epochs:
if x == '-offbounds':
epo_idx = 0
elif x == '+offbounds':
epo_idx = len(self.mne.inst) - self.mne.n_epochs
else:
epo_idx = max(x - self.mne.n_epochs // 2, 0)
x = self.mne.boundary_times[epo_idx]
elif x == '-offbounds':
x = 0
elif x == '+offbounds':
x = self.mne.xmax - self.mne.duration
else:
# Move click position to middle of view range
x -= self.mne.duration / 2
xmin = np.clip(x, 0, self.mne.xmax - self.mne.duration)
xmax = np.clip(xmin + self.mne.duration,
self.mne.duration, self.mne.xmax)
self.mne.plt.setXRange(xmin, xmax, padding=0)
# Set Y
if y == '-offbounds':
y = 0
elif y == '+offbounds':
y = self.mne.ymax - (self.mne.n_channels + 1)
else:
# Move click position to middle of view range
y -= self.mne.n_channels / 2
ymin = np.clip(y, 0, self.mne.ymax - (self.mne.n_channels + 1))
ymax = np.clip(ymin + self.mne.n_channels + 1,
self.mne.n_channels, self.mne.ymax)
# Check boundaries
if self.mne.fig_selection:
self.mne.fig_selection._scroll_to_idx(int(ymin))
else:
self.mne.plt.setYRange(ymin, ymax, padding=0)
def mousePressEvent(self, event):
self._set_range_from_pos(event.pos())
def mouseMoveEvent(self, event):
self._set_range_from_pos(event.pos())
def _fit_bg_img(self):
# Remove previous item from scene
if (self.bg_pxmp_item is not None and
self.bg_pxmp_item in self.scene().items()):
self.scene().removeItem(self.bg_pxmp_item)
# Resize Pixmap
if self.bg_pxmp is not None:
cnt_rect = self.contentsRect()
self.bg_pxmp = self.bg_pxmp.scaled(cnt_rect.width(),
cnt_rect.height(),
Qt.IgnoreAspectRatio)
self.bg_pxmp_item = self.scene().addPixmap(self.bg_pxmp)
def resizeEvent(self, event):
super().resizeEvent(event)
cnt_rect = self.contentsRect()
self.setSceneRect(QRectF(QPoint(0, 0),
QPoint(cnt_rect.width(),
cnt_rect.height())))
# Resize backgounrd
self._fit_bg_img()
# Resize Graphics Items (assuming height never changes)
# Resize bad_channels
for bad_ch_line in self.bad_line_dict.values():
current_line = bad_ch_line.line()
bad_ch_line.setLine(QLineF(current_line.p1(),
Point(cnt_rect.width(),
current_line.y2())))
# Resize event-lines
for ev_t, event_line in self.event_line_dict.items():
top_left = self._mapFromData(ev_t, 0)
bottom_right = self._mapFromData(ev_t, len(self.mne.ch_order))
event_line.setLine(QLineF(top_left, bottom_right))
if self.mne.is_epochs:
# Resize epoch lines
for epo_t, epoch_line in self.epoch_line_dict.items():
top_left = self._mapFromData(epo_t, 0)
bottom_right = self._mapFromData(epo_t,
len(self.mne.ch_order))
epoch_line.setLine(QLineF(top_left, bottom_right))
# Resize bad rects
for epo_idx, epoch_rect in self.bad_epoch_rect_dict.items():
start, stop = self.mne.boundary_times[epo_idx:epo_idx + 2]
top_left = self._mapFromData(start, 0)
bottom_right = self._mapFromData(stop, len(self.mne.ch_order))
epoch_rect.setRect(QRectF(top_left, bottom_right))
else:
# Resize annotation-rects
for annot_dict in self.annotations_rect_dict.values():
annot_rect = annot_dict['rect']
plot_onset = annot_dict['plot_onset']
duration = annot_dict['duration']
top_left = self._mapFromData(plot_onset, 0)
bottom_right = self._mapFromData(plot_onset + duration,
len(self.mne.ch_order))
annot_rect.setRect(QRectF(top_left, bottom_right))
# Update vline
if all([i is not None for i in [self.v_line, self.mne.vline]]):
value = self.mne.vline.value()
top_left = self._mapFromData(value, 0)
bottom_right = self._mapFromData(value, len(self.mne.ch_order))
self.v_line.setLine(QLineF(top_left, bottom_right))
# Update viewrange-rect
top_left = self._mapFromData(self.mne.t_start, self.mne.ch_start)
bottom_right = self._mapFromData(self.mne.t_start
+ self.mne.duration,
self.mne.ch_start
+ self.mne.n_channels)
self.viewrange_rect.setRect(QRectF(top_left, bottom_right))
def set_background(self):
# Add Overview-Pixmap
if self.mne.overview_mode == 'empty':
self.bg_pxmp = None
elif self.mne.overview_mode == 'channels':
channel_rgba = np.empty((len(self.mne.ch_order),
2, 4))
for line_idx, ch_idx in enumerate(self.mne.ch_order):
ch_type = self.mne.ch_types[ch_idx]
color = _get_color(self.mne.ch_color_dict[ch_type])
channel_rgba[line_idx, :] = color.getRgb()
channel_rgba = np.require(channel_rgba, np.uint8, 'C')
self.bg_img = QImage(channel_rgba,
channel_rgba.shape[1],
channel_rgba.shape[0],
QImage.Format_RGBA8888)
self.bg_pxmp = QPixmap.fromImage(self.bg_img)
elif self.mne.overview_mode == 'zscore':
self.bg_img = QImage(self.mne.zscore_rgba,
self.mne.zscore_rgba.shape[1],
self.mne.zscore_rgba.shape[0],
QImage.Format_RGBA8888)
self.bg_pxmp = QPixmap.fromImage(self.bg_img)
self._fit_bg_img()
def _mapFromData(self, x, y):
# Include padding from black frame
point_x = self.width() * x / self.mne.xmax
point_y = self.height() * y / len(self.mne.ch_order)
return Point(point_x, point_y)
def _mapToData(self, point):
# Include padding from black frame
xnorm = point.x() / self.width()
if xnorm < 0:
x = '-offbounds'
elif xnorm > 1:
x = '+offbounds'
else:
if self.mne.is_epochs:
# Return epoch index for epochs
x = int(len(self.mne.inst) * xnorm)
else:
time_idx = int((len(self.mne.inst.times) - 1) * xnorm)
x = self.mne.inst.times[time_idx]
ynorm = point.y() / self.height()
if ynorm < 0:
y = '-offbounds'
elif ynorm > 1:
y = '+offbounds'
else:
y = len(self.mne.ch_order) * ynorm
return x, y
def keyPressEvent(self, event):
self.main.keyPressEvent(event)
class RawViewBox(ViewBox):
def __init__(self, main):
super().__init__(invertY=True)
self.enableAutoRange(enable=False, x=False, y=False)
self.main = main
self.mne = main.mne
self._drag_start = None
self._drag_region = None
def mouseDragEvent(self, event, axis=None):
event.accept()
if event.button() == Qt.LeftButton \
and self.mne.annotation_mode:
if self.mne.current_description:
description = self.mne.current_description
if event.isStart():
self._drag_start = self.mapSceneToView(
event.lastScenePos()).x()
drag_stop = self.mapSceneToView(event.scenePos()).x()
self._drag_region = AnnotRegion(self.mne,
description=description,
values=(self._drag_start,
drag_stop))
self.mne.plt.addItem(self._drag_region)
self.mne.plt.addItem(self._drag_region.label_item)
elif event.isFinish():
drag_stop = self.mapSceneToView(event.scenePos()).x()
self._drag_region.setRegion((self._drag_start, drag_stop))
plot_onset = min(self._drag_start, drag_stop)
plot_offset = max(self._drag_start, drag_stop)
duration = abs(self._drag_start - drag_stop)
# Add to annotations
onset = _sync_onset(self.mne.inst, plot_onset,
inverse=True)
_merge_annotations(onset, onset + duration,
self.mne.current_description,
self.mne.inst.annotations)
# Add to regions/merge regions
merge_values = [plot_onset, plot_offset]
rm_regions = list()
for region in [r for r in self.mne.regions
if r.description ==
self.mne.current_description]:
values = region.getRegion()
if any([plot_onset < val < plot_offset for val in
values]):
merge_values += values
rm_regions.append(region)
if len(merge_values) > 2:
self._drag_region.setRegion((min(merge_values),
max(merge_values)))
for rm_region in rm_regions:
self.main._remove_region(rm_region, from_annot=False)
self.main._add_region(plot_onset, duration,
self.mne.current_description,
self._drag_region)
self._drag_region.select(True)
# Update Overview-Bar
self.mne.overview_bar.update_annotations()
else:
x_to = self.mapSceneToView(event.scenePos()).x()
self._drag_region.setRegion((self._drag_start, x_to))
elif event.isFinish():
self.main.message_box(text='No description!',
info_text='No description is given, '
'add one!',
icon=QMessageBox.Warning)
def mouseClickEvent(self, event):
# If we want the context-menu back, uncomment following line
# super().mouseClickEvent(event)
if not self.mne.annotation_mode:
if event.button() == Qt.LeftButton:
self.main._add_vline(self.mapSceneToView(
event.scenePos()).x())
elif event.button() == Qt.RightButton:
self.main._remove_vline()
def wheelEvent(self, ev, axis=None):
ev.accept()
scroll = -1 * ev.delta() / 120
if ev.orientation() == Qt.Horizontal:
self.main.hscroll(scroll * 10)
elif ev.orientation() == Qt.Vertical:
self.main.vscroll(scroll)
def keyPressEvent(self, event):
self.main.keyPressEvent(event)
class VLineLabel(InfLineLabel):
def __init__(self, vline):
super().__init__(vline, text='{value:.3f} s', position=0.98,
fill='g', color='b', movable=True)
self.cursorOffset = None
def mouseDragEvent(self, ev):
if self.movable and ev.button() == Qt.LeftButton:
if ev.isStart():
self.line.moving = True
self.cursorOffset = (self.line.pos() -
self.mapToView(ev.buttonDownPos()))
ev.accept()
if not self.line.moving:
return
self.line.setPos(self.cursorOffset + self.mapToView(ev.pos()))
self.line.sigDragged.emit(self)
if ev.isFinish():
self.line.moving = False
self.line.sigPositionChangeFinished.emit(self.line)
def valueChanged(self):
if not self.isVisible():
return
value = self.line.value()
if self.line.mne.is_epochs:
# Show epoch-time
t_vals_abs = np.linspace(0, self.line.mne.epoch_dur,
len(self.line.mne.inst.times))
search_val = value % self.line.mne.epoch_dur
t_idx = np.searchsorted(t_vals_abs, search_val)
value = self.line.mne.inst.times[t_idx]
self.setText(self.format.format(value=value))
self.updatePosition()
class VLine(InfiniteLine):
def __init__(self, mne, pos, bounds):
super().__init__(pos, pen='g', hoverPen='y',
movable=True, bounds=bounds)
self.mne = mne
self.label = VLineLabel(self)
class EventLine(InfiniteLine):
def __init__(self, pos, id, color):
super().__init__(pos, pen=color, movable=False,
label=str(id), labelOpts={'position': 0.98,
'color': color,
'anchors': [(0, 0.5),
(0, 0.5)]})
self.label.setFont(QFont('AnyStyle', 10, QFont.Bold))
self.setZValue(0)
class Crosshair(InfiniteLine):
def __init__(self):
super().__init__(angle=90, movable=False, pen='g')
self.y = 1
def set_data(self, x, y):
self.setPos(x)
self.y = y
def paint(self, p, *args):
super().paint(p, *args)
p.setPen(mkPen('r', width=4))
p.drawPoint(Point(self.y, 0))
class BaseScaleBar:
def __init__(self, mne, ch_type):
self.mne = mne
self.ch_type = ch_type
self.ypos = None
def _set_position(self, x, y):
pass
def _is_visible(self):
return self.ch_type in self.mne.ch_types[self.mne.picks]
def _get_ypos(self):
if self.mne.butterfly:
self.ypos = self.mne.butterfly_type_order.index(self.ch_type) + 1
else:
ch_type_idxs = np.where(self.mne.ch_types[self.mne.picks]
== self.ch_type)[0]
for idx in ch_type_idxs:
ch_name = self.mne.ch_names[self.mne.picks[idx]]
if ch_name not in self.mne.info['bads'] and \
ch_name not in self.mne.whitened_ch_names:
self.ypos = self.mne.ch_start + idx + 1
break
# Consider all indices bad
if self.ypos is None:
self.ypos = self.mne.ch_start + ch_type_idxs[0] + 1
def update_x_position(self):
if self._is_visible():
if self.ypos is None:
self._get_ypos()
self._set_position(self.mne.t_start, self.ypos)
def update_y_position(self):
if self._is_visible():
self.setVisible(True)
self._get_ypos()
self._set_position(self.mne.t_start, self.ypos)
else:
self.setVisible(False)
class ScaleBarText(BaseScaleBar, TextItem):
def __init__(self, mne, ch_type):
BaseScaleBar.__init__(self, mne, ch_type)
TextItem.__init__(self, color='
self.setFont(QFont('AnyStyle', 10))
self.setZValue(2) # To draw over RawTraceItems
self.update_value()
self.update_y_position()
def update_value(self):
scaler = 1 if self.mne.butterfly else 2
inv_norm = (scaler *
self.mne.scalings[self.ch_type] *
self.mne.unit_scalings[self.ch_type] /
self.mne.scale_factor)
self.setText(f'{_simplify_float(inv_norm)} '
f'{self.mne.units[self.ch_type]}')
def _set_position(self, x, y):
self.setPos(x, y)
class ScaleBar(BaseScaleBar, QGraphicsLineItem):
def __init__(self, mne, ch_type):
BaseScaleBar.__init__(self, mne, ch_type)
QGraphicsLineItem.__init__(self)
self.setZValue(1)
self.setPen(mkPen(color='
self.update_y_position()
def _set_position(self, x, y):
self.setLine(QLineF(x, y - 0.5, x, y + 0.5))
def get_ydata(self):
line = self.line()
return line.y1(), line.y2()
class _BaseDialog(QDialog):
def __init__(self, main, widget=None,
modal=False, name=None, title=None):
super().__init__(main)
self.main = main
self.widget = widget
self.mne = main.mne
self.name = name
self.modal = modal
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.mne.child_figs.append(self)
if self.name is not None:
setattr(self.mne, self.name, self)
if title is not None:
self.setWindowTitle(title)
if self.widget is not None:
layout = QVBoxLayout()
layout.addWidget(self.widget)
self.setLayout(layout)
def show(self, center=True):
if self.modal:
self.open()
else:
super().show()
if center:
# center dialog
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.close()
else:
self.parent().keyPressEvent(event)
def closeEvent(self, event):
if hasattr(self, 'name') and hasattr(self, 'mne'):
if self.name is not None and hasattr(self.mne, self.name):
setattr(self.mne, self.name, None)
if self in self.mne.child_figs:
self.mne.child_figs.remove(self)
event.accept()
class SettingsDialog(_BaseDialog):
def __init__(self, main, **kwargs):
super().__init__(main, **kwargs)
layout = QFormLayout()
self.downsampling_box = QSpinBox()
self.downsampling_box.setToolTip('Set an integer as the downsampling'
' factor or "Auto" to get the factor'
' from the visible range.\n'
' Setting the factor 1 means no '
'downsampling.\n'
' Default is 1.')
self.downsampling_box.setMinimum(0)
self.downsampling_box.setSpecialValueText('Auto')
self.downsampling_box.valueChanged.connect(partial(
self._value_changed, value_name='downsampling'))
self.downsampling_box.setValue(0 if self.mne.downsampling == 'auto'
else self.mne.downsampling)
layout.addRow('downsampling', self.downsampling_box)
self.ds_method_cmbx = QComboBox()
self.ds_method_cmbx.setToolTip(
'<h2>Downsampling Method</h2>'
'<ul>'
'<li>subsample:<br>'
'Only take every n-th sample.</li>'
'<li>mean:<br>'
'Take the mean of n samples.</li>'
'<li>peak:<br>'
'Draws a saw wave from the minimum to the maximum from a '
'collection of n samples.</li>'
'</ul>'
'<i>(Those methods are adapted from '
'pyqtgraph)</i><br>'
'Default is "peak".')
self.ds_method_cmbx.addItems(['subsample', 'mean', 'peak'])
self.ds_method_cmbx.currentTextChanged.connect(partial(
self._value_changed, value_name='ds_method'))
self.ds_method_cmbx.setCurrentText(
self.mne.ds_method)
layout.addRow('ds_method', self.ds_method_cmbx)
self.scroll_sensitivity_slider = QSlider(Qt.Horizontal)
self.scroll_sensitivity_slider.setMinimum(10)
self.scroll_sensitivity_slider.setMaximum(1000)
self.scroll_sensitivity_slider.setToolTip('Set the sensitivity of '
'the scrolling in '
'horizontal direction.')
self.scroll_sensitivity_slider.valueChanged.connect(partial(
self._value_changed, value_name='scroll_sensitivity'))
# Set default
self.scroll_sensitivity_slider.setValue(self.mne.scroll_sensitivity)
layout.addRow('horizontal scroll sensitivity',
self.scroll_sensitivity_slider)
self.setLayout(layout)
self.show()
def closeEvent(self):
_disconnect(self.ds_method_cmbx.currentTextChanged)
_disconnect(self.scroll_sensitivity_slider.valueChanged)
super.closeEvent()
def _value_changed(self, new_value, value_name):
if value_name == 'downsampling' and new_value == 0:
new_value = 'auto'
setattr(self.mne, value_name, new_value)
if value_name == 'scroll_sensitivity':
self.mne.ax_hscroll._update_scroll_sensitivity()
else:
self.main._redraw()
class HelpDialog(_BaseDialog):
def __init__(self, main, **kwargs):
super().__init__(main, **kwargs)
# Show all keyboard-shortcuts in a Scroll-Area
layout = QVBoxLayout()
keyboard_label = QLabel('Keyboard Shortcuts')
keyboard_label.setFont(QFont('AnyStyle', 16, QFont.Bold))
layout.addWidget(keyboard_label)
scroll_area = QScrollArea()
scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll_area.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.MinimumExpanding)
scroll_widget = QWidget()
form_layout = QFormLayout()
for key in main.mne.keyboard_shortcuts:
key_dict = main.mne.keyboard_shortcuts[key]
if 'description' in key_dict:
if 'alias' in key_dict:
key = key_dict['alias']
for idx, key_des in enumerate(key_dict['description']):
key_name = key
if 'modifier' in key_dict:
mod = key_dict['modifier'][idx]
if mod is not None:
key_name = mod + ' + ' + key_name
form_layout.addRow(key_name, QLabel(key_des))
scroll_widget.setLayout(form_layout)
scroll_area.setWidget(scroll_widget)
layout.addWidget(scroll_area)
# Additional help for mouse interaction
inst = self.main.mne.instance_type
is_raw = inst == 'raw'
is_epo = inst == 'epochs'
is_ica = inst == 'ica'
ch_cmp = 'component' if is_ica else 'channel'
ch_epo = 'epoch' if is_epo else 'channel'
ica_bad = 'Mark/unmark component for exclusion'
lclick_data = ica_bad if is_ica else f'Mark/unmark bad {ch_epo}'
lclick_name = (ica_bad if is_ica else 'Mark/unmark bad channel')
ldrag = 'add annotation (in annotation mode)' if is_raw else None
rclick_name = dict(ica='Show diagnostics for component',
epochs='Show imageplot for channel',
raw='Show channel location')[inst]
mouse_help = [(f'Left-click {ch_cmp} name', lclick_name),
(f'Left-click {ch_cmp} data', lclick_data),
('Left-click-and-drag on plot', ldrag),
('Left-click on plot background',
'Place vertical guide'),
('Right-click on plot background',
'Clear vertical guide'),
('Right-click on channel name', rclick_name)]
mouse_label = QLabel('Mouse Interaction')
mouse_label.setFont(QFont('AnyStyle', 16, QFont.Bold))
layout.addWidget(mouse_label)
mouse_widget = QWidget()
mouse_layout = QFormLayout()
for interaction, description in mouse_help:
if description is not None:
mouse_layout.addRow(f'{interaction}:', QLabel(description))
mouse_widget.setLayout(mouse_layout)
layout.addWidget(mouse_widget)
self.setLayout(layout)
self.show()
# Set minimum width to avoid horizontal scrolling
scroll_area.setMinimumWidth(scroll_widget.minimumSizeHint().width() +
scroll_area.verticalScrollBar().width())
self.update()
class ProjDialog(_BaseDialog):
def __init__(self, main, **kwargs):
self.external_change = True
# Create projection-layout
super().__init__(main, **kwargs)
layout = QVBoxLayout()
labels = [p['desc'] for p in self.mne.projs]
for ix, active in enumerate(self.mne.projs_active):
if active:
labels[ix] += ' (already applied)'
# make title
layout.addWidget(QLabel('Mark projectors applied on the plot.\n'
'(Applied projectors are dimmed).'))
# Add checkboxes
self.checkboxes = list()
for idx, label in enumerate(labels):
chkbx = QCheckBox(label)
chkbx.setChecked(bool(self.mne.projs_on[idx]))
chkbx.clicked.connect(partial(self._proj_changed, idx=idx))
if self.mne.projs_active[idx]:
chkbx.setEnabled(False)
self.checkboxes.append(chkbx)
layout.addWidget(chkbx)
self.toggle_all_bt = QPushButton('Toggle All')
self.toggle_all_bt.clicked.connect(self.toggle_all)
layout.addWidget(self.toggle_all_bt)
self.setLayout(layout)
self.show()
def _proj_changed(self, state, idx):
# Only change if proj wasn't already applied.
if not self.mne.projs_active[idx]:
self.mne.projs_on[idx] = state
self.main._apply_update_projectors()
def toggle_all(self):
self.main._apply_update_projectors(toggle_all=True)
for idx, chkbx in enumerate(self.checkboxes):
chkbx.setChecked(bool(self.mne.projs_on[idx]))
class _ChannelFig(FigureCanvasQTAgg):
def __init__(self, figure):
self.figure = figure
super().__init__(figure)
self.setFocusPolicy(Qt.StrongFocus | Qt.WheelFocus)
self.setFocus()
self._lasso_path = None
self.setMouseTracking(False)
def paintEvent(self, event):
super().paintEvent(event)
# in Qt.
if self._lasso_path is not None:
painter = QPainter(self)
painter.setPen(mkPen('red', width=2))
painter.drawPath(self._lasso_path)
painter.end()
def mouseMoveEvent(self, event):
super().mouseMoveEvent(event)
if self._lasso_path is None:
self._lasso_path = QPainterPath()
self._lasso_path.moveTo(event.pos())
else:
self._lasso_path.lineTo(event.pos())
self.update()
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
self._lasso_path = None
self.update()
def keyPressEvent(self, event):
event.ignore()
class SelectionDialog(_BaseDialog):
def __init__(self, main):
# Create widget
super().__init__(main, name='fig_selection',
title='Channel selection')
xpos = QApplication.desktop().screenGeometry().width() - 400
self.setGeometry(xpos, 100, 400, 800)
layout = QVBoxLayout()
# Add channel plot
fig = _figure_agg(figsize=(6, 6), dpi=96)
ax = fig.add_axes([0, 0, 1, 1])
self.channel_fig = plot_sensors(self.mne.info, kind='select',
ch_type='all', title='',
ch_groups=self.mne.group_by, axes=ax,
show=False)[0]
if hasattr(self.channel_fig.lasso, 'callbacks'):
# MNE >= 1.0
self.channel_fig.lasso.callbacks.append(self._set_custom_selection)
else:
# MNE <= 0.24
self.channel_fig.canvas.mpl_connect(
'lasso_event', self._set_custom_selection)
self.channel_widget = _ChannelFig(self.channel_fig)
layout.addWidget(self.channel_widget)
selections_dict = self.mne.ch_selections
selections_dict.update(Custom=np.array([], dtype=int)) # for lasso
self.chkbxs = OrderedDict()
for label in selections_dict:
chkbx = QCheckBox(label)
chkbx.clicked.connect(partial(self._chkbx_changed, label))
self.chkbxs[label] = chkbx
layout.addWidget(chkbx)
self.mne.old_selection = list(selections_dict.keys())[0]
self.chkbxs[self.mne.old_selection].setChecked(True)
self._update_highlighted_sensors()
# add instructions at bottom
instructions = (
'To use a custom selection, first click-drag on the sensor plot '
'to "lasso" the sensors you want to select, or hold Ctrl while '
'clicking individual sensors. Holding Ctrl while click-dragging '
'allows a lasso selection adding to (rather than replacing) the '
'existing selection.')
help_widget = QTextEdit(instructions)
help_widget.setReadOnly(True)
layout.addWidget(help_widget)
self.setLayout(layout)
self.show(center=False)
def _chkbx_changed(self, label):
# Disable butterfly if checkbox is clicked
if self.mne.butterfly:
self.main._set_butterfly(False)
# Disable other checkboxes
for chkbx in self.chkbxs.values():
chkbx.setChecked(False)
if (label == 'Custom' and
not len(self.mne.ch_selections['Custom'])):
label = self.mne.old_selection
# Select the checkbox no matter if clicked on when active or not
self.chkbxs[label].setChecked(True)
# Update selections
self.mne.old_selection = label
self.mne.picks = np.asarray(self.mne.ch_selections[label])
self.mne.n_channels = len(self.mne.picks)
# Update highlighted sensors
self._update_highlighted_sensors()
# if "Vertex" is defined, some channels appear twice, so if
# "Vertex" is selected, ch_start should be the *first* match;
# otherwise it should be the *last* match (since "Vertex" is
# always the first selection group, if it exists).
if label == 'Custom':
self.mne.ch_start = 0
else:
all_values = list()
for key, chs in self.mne.ch_selections.items():
if np.array_equal(chs, self.mne.picks):
self.mne.ch_start = len(all_values)
break
else:
all_values = np.concatenate([all_values, chs])
# Apply changes on view
self.mne.plt.setYRange(self.mne.ch_start,
self.mne.ch_start + self.mne.n_channels + 1,
padding=0)
# Update scrollbar
label_idx = list(self.mne.ch_selections.keys()).index(label)
self.mne.ax_vscroll.update_value(label_idx)
# Update all y-positions, because channels can appear in multiple
# selections on different y-positions
for trace in self.mne.traces:
trace.update_ypos()
trace.update_data()
def _set_custom_selection(self):
chs = self.channel_fig.lasso.selection
inds = np.in1d(self.mne.ch_names, chs)
self.mne.ch_selections['Custom'] = inds.nonzero()[0]
if any(inds):
self._chkbx_changed('Custom')
def _update_highlighted_sensors(self):
inds = np.in1d(self.mne.fig_selection.channel_fig.lasso.ch_names,
self.mne.ch_names[self.mne.picks]).nonzero()[0]
self.channel_fig.lasso.select_many(inds)
self.channel_widget.draw()
def _update_bad_sensors(self, pick, mark_bad):
sensor_picks = list()
ch_indices = channel_indices_by_type(self.mne.info)
for this_type in _DATA_CH_TYPES_SPLIT:
if this_type in self.mne.ch_types:
sensor_picks.extend(ch_indices[this_type])
sensor_idx = np.in1d(sensor_picks, pick).nonzero()[0]
# change the sensor color
fig = self.channel_fig
fig.lasso.ec[sensor_idx, 0] = float(mark_bad) # change R of RGBA array
fig.lasso.collection.set_edgecolors(fig.lasso.ec)
fig.canvas.draw_idle()
self.channel_widget.draw()
def _style_butterfly(self):
for key, chkbx in self.chkbxs.items():
if self.mne.butterfly:
chkbx.setChecked(False)
else:
if key == self.mne.old_selection:
chkbx.setChecked(True)
self._update_highlighted_sensors()
def _scroll_selection(self, step):
name_idx = list(self.mne.ch_selections.keys()).index(
self.mne.old_selection)
new_idx = np.clip(name_idx + step,
0, len(self.mne.ch_selections) - 1)
new_label = list(self.mne.ch_selections.keys())[new_idx]
self._chkbx_changed(new_label)
def _scroll_to_idx(self, idx):
all_values = list()
label = list(self.mne.ch_selections.keys())[0]
for key, values in self.mne.ch_selections.items():
all_values = np.concatenate([all_values, values])
if idx < len(all_values):
label = key
break
self._chkbx_changed(label)
def closeEvent(self, event):
super().closeEvent(event)
if hasattr(self.channel_fig.lasso, 'callbacks'):
# MNE >= 1.0
self.channel_fig.lasso.callbacks.clear()
for chkbx in self.chkbxs.values():
_disconnect(chkbx.clicked)
if hasattr(self, 'main'):
self.main.close()
class AnnotRegion(LinearRegionItem):
regionChangeFinished = pyqtSignal(object)
gotSelected = pyqtSignal(object)
removeRequested = pyqtSignal(object)
def __init__(self, mne, description, values):
super().__init__(values=values, orientation='vertical',
movable=True, swapMode='sort',
bounds=(0, mne.xmax))
# Set default z-value to 0 to be behind other items in scene
self.setZValue(0)
self.sigRegionChangeFinished.connect(self._region_changed)
self.mne = mne
self.description = description
self.old_onset = values[0]
self.selected = False
self.label_item = TextItem(text=description, anchor=(0.5, 0.5))
self.label_item.setFont(QFont('AnyStyle', 10, QFont.Bold))
self.sigRegionChanged.connect(self.update_label_pos)
self.update_color()
def _region_changed(self):
self.regionChangeFinished.emit(self)
self.old_onset = self.getRegion()[0]
def update_color(self):
color_string = self.mne.annotation_segment_colors[self.description]
self.base_color = _get_color(color_string)
self.hover_color = _get_color(color_string)
self.text_color = _get_color(color_string)
self.base_color.setAlpha(75)
self.hover_color.setAlpha(150)
self.text_color.setAlpha(255)
self.line_pen = mkPen(color=self.hover_color, width=2)
self.hover_pen = mkPen(color=self.text_color, width=2)
self.setBrush(self.base_color)
self.setHoverBrush(self.hover_color)
self.label_item.setColor(self.text_color)
for line in self.lines:
line.setPen(self.line_pen)
line.setHoverPen(self.hover_pen)
self.update()
def update_description(self, description):
self.description = description
self.label_item.setText(description)
self.label_item.update()
def update_visible(self, visible):
self.setVisible(visible)
self.label_item.setVisible(visible)
def remove(self):
self.removeRequested.emit(self)
vb = self.mne.viewbox
if vb and self.label_item in vb.addedItems:
vb.removeItem(self.label_item)
def select(self, selected):
self.selected = selected
if selected:
self.label_item.setColor('w')
self.label_item.fill = mkBrush(self.hover_color)
self.gotSelected.emit(self)
else:
self.label_item.setColor(self.text_color)
self.label_item.fill = mkBrush(None)
self.label_item.update()
def mouseClickEvent(self, event):
if self.mne.annotation_mode:
if event.button() == Qt.LeftButton and self.movable:
self.select(True)
event.accept()
elif event.button() == Qt.RightButton and self.movable:
self.remove()
# Propagate remove request to lower annotations if overlapping
event.ignore()
else:
event.ignore()
def update_label_pos(self):
rgn = self.getRegion()
vb = self.mne.viewbox
if vb:
ymax = vb.viewRange()[1][1]
self.label_item.setPos(sum(rgn) / 2, ymax - 0.3)
class _AnnotEditDialog(_BaseDialog):
def __init__(self, annot_dock):
super().__init__(annot_dock.main, title='Edit Annotations')
self.ad = annot_dock
self.current_mode = None
layout = QVBoxLayout()
self.descr_label = QLabel()
if self.mne.selected_region:
self.mode_cmbx = QComboBox()
self.mode_cmbx.addItems(['all', 'selected'])
self.mode_cmbx.currentTextChanged.connect(self._mode_changed)
layout.addWidget(QLabel('Edit Scope:'))
layout.addWidget(self.mode_cmbx)
# Set group as default
self._mode_changed('all')
layout.addWidget(self.descr_label)
self.input_w = QLineEdit()
layout.addWidget(self.input_w)
bt_layout = QHBoxLayout()
ok_bt = QPushButton('Ok')
ok_bt.clicked.connect(self._edit)
bt_layout.addWidget(ok_bt)
cancel_bt = QPushButton('Cancel')
cancel_bt.clicked.connect(self.close)
bt_layout.addWidget(cancel_bt)
layout.addLayout(bt_layout)
self.setLayout(layout)
self.show()
def _mode_changed(self, mode):
self.current_mode = mode
if mode == 'all':
curr_des = self.ad.description_cmbx.currentText()
else:
curr_des = self.mne.selected_region.description
self.descr_label.setText(f'Change "{curr_des}" to:')
def _edit(self):
new_des = self.input_w.text()
if new_des:
if self.current_mode == 'all' or self.mne.selected_region is None:
self.ad._edit_description_all(new_des)
else:
self.ad._edit_description_selected(new_des)
self.close()
class AnnotationDock(QDockWidget):
def __init__(self, main):
super().__init__('Annotations')
self.main = main
self.mne = main.mne
self._init_ui()
self.setFeatures(QDockWidget.DockWidgetMovable |
QDockWidget.DockWidgetFloatable)
def _init_ui(self):
widget = QWidget()
layout = QHBoxLayout()
layout.setAlignment(Qt.AlignLeft)
self.description_cmbx = QComboBox()
self.description_cmbx.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.description_cmbx.activated.connect(self._description_changed)
self._update_description_cmbx()
layout.addWidget(self.description_cmbx)
add_bt = QPushButton('Add Description')
add_bt.clicked.connect(self._add_description_dlg)
layout.addWidget(add_bt)
rm_bt = QPushButton('Remove Description')
rm_bt.clicked.connect(self._remove_description_dlg)
layout.addWidget(rm_bt)
edit_bt = QPushButton('Edit Description')
edit_bt.clicked.connect(self._edit_description_dlg)
layout.addWidget(edit_bt)
# Uncomment when custom colors for annotations are implemented in
# MNE-Python.
# color_bt = QPushButton('Edit Color')
# color_bt.clicked.connect(self._set_color)
# layout.addWidget(color_bt)
select_bt = QPushButton('Select Visible')
select_bt.clicked.connect(self._select_annotations)
layout.addWidget(select_bt)
# Determine reasonable time decimals from sampling frequency.
time_decimals = int(np.ceil(np.log10(self.mne.info['sfreq'])))
layout.addWidget(QLabel('Start:'))
self.start_bx = QDoubleSpinBox()
self.start_bx.setDecimals(time_decimals)
self.start_bx.editingFinished.connect(self._start_changed)
layout.addWidget(self.start_bx)
layout.addWidget(QLabel('Stop:'))
self.stop_bx = QDoubleSpinBox()
self.stop_bx.setDecimals(time_decimals)
self.stop_bx.editingFinished.connect(self._stop_changed)
layout.addWidget(self.stop_bx)
help_bt = QPushButton(QIcon(":/help.svg"), 'Help')
help_bt.clicked.connect(self._show_help)
layout.addWidget(help_bt)
widget.setLayout(layout)
self.setWidget(widget)
def _add_description_to_cmbx(self, description):
color_pixmap = QPixmap(25, 25)
color = _get_color(self.mne.annotation_segment_colors[description])
color.setAlpha(75)
color_pixmap.fill(color)
color_icon = QIcon(color_pixmap)
self.description_cmbx.addItem(color_icon, description)
def _add_description(self, new_description):
self.mne.new_annotation_labels.append(new_description)
self.mne.visible_annotations[new_description] = True
self.main._setup_annotation_colors()
self._add_description_to_cmbx(new_description)
self.mne.current_description = new_description
self.description_cmbx.setCurrentText(new_description)
def _add_description_dlg(self):
new_description, ok = QInputDialog.getText(self,
'Set new description!',
'New description: ')
if ok and new_description \
and new_description not in self.mne.new_annotation_labels:
self._add_description(new_description)
def _edit_description_all(self, new_des):
old_des = self.description_cmbx.currentText()
edit_regions = [r for r in self.mne.regions
if r.description == old_des]
# Update regions & annotations
for ed_region in edit_regions:
idx = self.main._get_onset_idx(ed_region.getRegion()[0])
self.mne.inst.annotations.description[idx] = new_des
ed_region.update_description(new_des)
# Update containers with annotation-attributes
self.mne.new_annotation_labels.remove(old_des)
self.mne.new_annotation_labels = self.main._get_annotation_labels()
self.mne.visible_annotations[new_des] = \
self.mne.visible_annotations.pop(old_des)
self.mne.annotation_segment_colors[new_des] = \
self.mne.annotation_segment_colors.pop(old_des)
# Update related widgets
self.main._setup_annotation_colors()
self._update_regions_colors()
self._update_description_cmbx()
self.mne.overview_bar.update_annotations()
def _edit_description_selected(self, new_des):
old_des = self.mne.selected_region.description
idx = self.main._get_onset_idx(self.mne.selected_region.getRegion()[0])
# Update regions & annotations
self.mne.inst.annotations.description[idx] = new_des
self.mne.selected_region.update_description(new_des)
# Update containers with annotation-attributes
if new_des not in self.mne.new_annotation_labels:
self.mne.new_annotation_labels.append(new_des)
self.mne.visible_annotations[new_des] = \
copy(self.mne.visible_annotations[old_des])
if old_des not in self.mne.inst.annotations.description:
self.mne.new_annotation_labels.remove(old_des)
self.mne.visible_annotations.pop(old_des)
self.mne.annotation_segment_colors[new_des] = \
self.mne.annotation_segment_colors.pop(old_des)
# Update related widgets
self.main._setup_annotation_colors()
self._update_regions_colors()
self._update_description_cmbx()
self.mne.overview_bar.update_annotations()
def _edit_description_dlg(self):
if len(self.mne.inst.annotations.description) > 0:
_AnnotEditDialog(self)
else:
self.main.message_box(text='No Annotations!',
info_text='There are no annotations '
'yet to edit!',
icon=QMessageBox.Information)
def _remove_description(self, rm_description):
# Remove regions
for rm_region in [r for r in self.mne.regions
if r.description == rm_description]:
rm_region.remove()
# Remove from descriptions
self.mne.new_annotation_labels.remove(rm_description)
self._update_description_cmbx()
# Remove from visible annotations
self.mne.visible_annotations.pop(rm_description)
# Remove from color-mapping
if rm_description in self.mne.annotation_segment_colors:
self.mne.annotation_segment_colors.pop(rm_description)
# Set first description in Combo-Box to current description
if self.description_cmbx.count() > 0:
self.description_cmbx.setCurrentIndex(0)
self.mne.current_description = \
self.description_cmbx.currentText()
def _remove_description_dlg(self):
rm_description = self.description_cmbx.currentText()
existing_annot = list(self.mne.inst.annotations.description).count(
rm_description)
if existing_annot > 0:
text = f'Remove annotations with {rm_description}?'
info_text = f'There exist {existing_annot} annotations with ' \
f'"{rm_description}".\n' \
f'Do you really want to remove them?'
buttons = QMessageBox.Yes | QMessageBox.No
ans = self.main.message_box(text=text, info_text=info_text,
buttons=buttons,
default_button=QMessageBox.Yes,
icon=QMessageBox.Question)
else:
ans = QMessageBox.Yes
if ans == QMessageBox.Yes:
self._remove_description(rm_description)
def _select_annotations(self):
def _set_visible_region(state, description):
self.mne.visible_annotations[description] = bool(state)
def _select_all():
for chkbx in chkbxs:
chkbx.setChecked(True)
def _clear_all():
for chkbx in chkbxs:
chkbx.setChecked(False)
select_dlg = QDialog(self)
chkbxs = list()
layout = QVBoxLayout()
layout.addWidget(QLabel('Select visible labels:'))
# Add descriptions to scroll-area to be scalable.
scroll_area = QScrollArea()
scroll_widget = QWidget()
scroll_layout = QVBoxLayout()
for des in self.mne.visible_annotations:
chkbx = QCheckBox(des)
chkbx.setChecked(self.mne.visible_annotations[des])
chkbx.stateChanged.connect(partial(_set_visible_region,
description=des))
chkbxs.append(chkbx)
scroll_layout.addWidget(chkbx)
scroll_widget.setLayout(scroll_layout)
scroll_area.setWidget(scroll_widget)
layout.addWidget(scroll_area)
bt_layout = QGridLayout()
all_bt = QPushButton('All')
all_bt.clicked.connect(_select_all)
bt_layout.addWidget(all_bt, 0, 0)
clear_bt = QPushButton('Clear')
clear_bt.clicked.connect(_clear_all)
bt_layout.addWidget(clear_bt, 0, 1)
ok_bt = QPushButton('Ok')
ok_bt.clicked.connect(select_dlg.close)
bt_layout.addWidget(ok_bt, 1, 0, 1, 2)
layout.addLayout(bt_layout)
select_dlg.setLayout(layout)
select_dlg.exec()
self.main._update_regions_visible()
def _description_changed(self, descr_idx):
new_descr = self.description_cmbx.itemText(descr_idx)
self.mne.current_description = new_descr
def _start_changed(self):
start = self.start_bx.value()
sel_region = self.mne.selected_region
if sel_region:
stop = sel_region.getRegion()[1]
if start < stop:
sel_region.setRegion((start, stop))
else:
self.main.message_box(text='Invalid value!',
info_text='Start can\'t be bigger or '
'equal to Stop!',
icon=QMessageBox.Critical,
modal=False)
self.start_bx.setValue(sel_region.getRegion()[0])
def _stop_changed(self):
stop = self.stop_bx.value()
sel_region = self.mne.selected_region
if sel_region:
start = sel_region.getRegion()[0]
if start < stop:
sel_region.setRegion((start, stop))
else:
self.main.message_box(text='Invalid value!',
info_text='Stop can\'t be smaller or '
'equal to Start!',
icon=QMessageBox.Critical)
self.stop_bx.setValue(sel_region.getRegion()[1])
def _set_color(self):
curr_descr = self.description_cmbx.currentText()
if curr_descr in self.mne.annotation_segment_colors:
curr_col = self.mne.annotation_segment_colors[curr_descr]
else:
curr_col = None
color = QColorDialog.getColor(_get_color(curr_col), self,
f'Choose color for {curr_descr}!')
if color.isValid():
self.mne.annotation_segment_colors[curr_descr] = color
self._update_regions_colors()
self._update_description_cmbx()
self.mne.overview_bar.update_annotations()
def update_values(self, region):
rgn = region.getRegion()
self.start_bx.setValue(rgn[0])
self.stop_bx.setValue(rgn[1])
def _update_description_cmbx(self):
self.description_cmbx.clear()
descriptions = self.main._get_annotation_labels()
for description in descriptions:
self._add_description_to_cmbx(description)
self.description_cmbx.setCurrentText(self.mne.current_description)
def _update_regions_colors(self):
for region in self.mne.regions:
region.update_color()
def reset(self):
if self.description_cmbx.count() > 0:
self.description_cmbx.setCurrentIndex(0)
self.mne.current_description = self.description_cmbx.currentText()
self.start_bx.setValue(0)
self.stop_bx.setValue(0)
def _show_help(self):
info_text = '<h1>Help</h1>' \
'<h2>Annotations</h2>' \
'<h3>Add Annotations</h3>' \
'Drag inside the data-view to create annotations with '\
'the description currently selected (leftmost item of '\
'the toolbar).If there is no description yet, add one ' \
'with the button "Add description".' \
'<h3>Remove Annotations</h3>' \
'You can remove single annotations by right-clicking on '\
'them.' \
'<h3>Edit Annotations</h3>' \
'You can edit annotations by dragging them or their '\
'boundaries. Or you can use the dials in the toolbar to '\
'adjust the boundaries for the current selected '\
'annotation.' \
'<h2>Descriptions</h2>' \
'<h3>Add Description</h3>' \
'Add a new description with ' \
'the button "Add description".' \
'<h3>Edit Description</h3>' \
'You can edit the description of one single annotation '\
'or all annotations of the currently selected kind with '\
'the button "Edit description".' \
'<h3>Remove Description</h3>' \
'You can remove all annotations of the currently '\
'selected kind with the button "Remove description".'
self.main.message_box(text='Annotations-Help',
info_text=info_text,
icon=QMessageBox.Information)
class BrowserView(GraphicsView):
def __init__(self, plot, **kwargs):
super().__init__(**kwargs)
self.setCentralItem(plot)
self.viewport().setAttribute(Qt.WA_AcceptTouchEvents, True)
self.viewport().grabGesture(Qt.PinchGesture)
self.viewport().grabGesture(Qt.SwipeGesture)
# def viewportEvent(self, event):
# """Customize viewportEvent for touch-gestures (WIP)."""
# if event.type() in [QEvent.TouchBegin, QEvent.TouchUpdate,
# QEvent.TouchEnd]:
# if event.touchPoints() == 2:
# pass
# elif event.type() == QEvent.Gesture:
# print('Gesture')
# return super().viewportEvent(event)
def mouseMoveEvent(self, ev):
# Don't set GraphicsView.mouseEnabled to True,
super().mouseMoveEvent(ev)
self.sigSceneMouseMoved.emit(ev.pos())
class LoadThread(QThread):
loadProgress = pyqtSignal(int)
processText = pyqtSignal(str)
loadingFinished = pyqtSignal()
def __init__(self, browser):
super().__init__()
self.browser = browser
self.mne = browser.mne
self.loadProgress.connect(self.mne.load_progressbar.setValue)
self.processText.connect(self.browser._show_process)
self.loadingFinished.connect(self.browser._precompute_finished)
def run(self):
data = None
if self.mne.is_epochs:
times = np.arange(len(self.mne.inst) * len(self.mne.inst.times)) \
/ self.mne.info['sfreq']
else:
times = None
n_chunks = min(10, len(self.mne.inst))
chunk_size = len(self.mne.inst) // n_chunks
for n in range(n_chunks):
start = n * chunk_size
if n == n_chunks - 1:
stop = None
else:
stop = start + chunk_size
if self.mne.is_epochs:
item = slice(start, stop)
with self.mne.inst.info._unlock():
data_chunk = np.concatenate(
self.mne.inst.get_data(item=item), axis=-1)
else:
data_chunk, times_chunk = self.browser._load_data(start, stop)
if times is None:
times = times_chunk
else:
times = np.concatenate((times, times_chunk), axis=0)
if data is None:
data = data_chunk
else:
data = np.concatenate((data, data_chunk), axis=1)
self.loadProgress.emit(n + 1)
picks = self.mne.ch_order
stashed_remove_dc = self.mne.remove_dc
self.mne.remove_dc = False
data = self.browser._process_data(data, 0, len(data), picks, self)
self.mne.remove_dc = stashed_remove_dc
self.mne.global_data = data
self.mne.global_times = times
self.processText.emit('Calculating Z-Scores...')
self.browser._get_zscore(data)
self.loadingFinished.emit()
def clean(self):
if self.isRunning():
wait_time = 10
logger.info('Waiting for Loading-Thread to finish... '
f'(max. {wait_time} sec)')
self.wait(int(wait_time * 1e3))
_disconnect(self.loadProgress)
_disconnect(self.processText)
_disconnect(self.loadingFinished)
del self.mne
del self.browser
class _FastToolTipComboBox(QComboBox):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setMouseTracking(True)
def setToolTip(self, tooltip):
self.tooltip = tooltip
def enterEvent(self, event):
QToolTip.showText(event.globalPos(), self.tooltip)
super().enterEvent(event)
class _PGMetaClass(type(BrowserBase), type(QMainWindow)):
pass
qsettings_params = {
'antialiasing': False,
# Steps per view (relative to time)
'scroll_sensitivity': 100,
# Downsampling-Factor (or 'auto', see SettingsDialog for details)
'downsampling': 1,
# Downsampling-Method (set SettingsDialog for details)
'ds_method': 'peak'
}
def _disconnect(sig):
try:
sig.disconnect()
except TypeError: # if there are no connections, ignore it
pass
class PyQtGraphBrowser(BrowserBase, QMainWindow, metaclass=_PGMetaClass):
gotClosed = pyqtSignal()
def __init__(self, **kwargs):
self.backend_name = 'pyqtgraph'
BrowserBase.__init__(self, **kwargs)
QMainWindow.__init__(self)
# Add to list to keep a reference and avoid premature
# garbage-collection.
_browser_instances.append(self)
if self.mne.window_title is not None:
self.setWindowTitle(self.mne.window_title)
# Initialize attributes which are only used by pyqtgraph, not by
# matplotlib and add them to MNEBrowseParams.
# Exactly one MessageBox for messages to facilitate testing/debugging
self.msg_box = QMessageBox(self)
# MessageBox modality needs to be adapted for tests
# (otherwise test execution blocks)
self.test_mode = False
# A Settings-Dialog
self.mne.fig_settings = None
# Stores decimated data
self.mne.decim_data = None
self.mne.decim_times = None
# Stores ypos for selection-mode
self.mne.selection_ypos_dict = dict()
# Parameters for precomputing
self.mne.enable_precompute = False
self.mne.data_precomputed = False
self._rerun_load_thread = False
# Parameters for overviewbar
self.mne.show_overview_bar = True
self.mne.overview_mode = 'channels'
self.mne.zscore_rgba = None
# Container for traces
self.mne.traces = list()
# Scale-Factor
self.mne.scale_factor = 1
# Stores channel-types for butterfly-mode
self.mne.butterfly_type_order = [tp for tp in
_DATA_CH_TYPES_ORDER_DEFAULT
if tp in self.mne.ch_types]
if self.mne.is_epochs:
# Stores parameters for epochs
self.mne.epoch_dur = np.diff(self.mne.boundary_times[:2])[0]
epoch_idx = np.searchsorted(self.mne.midpoints,
(self.mne.t_start,
self.mne.t_start + self.mne.duration))
self.mne.epoch_idx = np.arange(epoch_idx[0], epoch_idx[1])
# Load from QSettings if available
for qparam in qsettings_params:
default = qsettings_params[qparam]
qvalue = QSettings().value(qparam, defaultValue=default)
# QSettings may alter types depending on OS
if not isinstance(qvalue, type(default)):
try:
qvalue = literal_eval(qvalue)
except (SyntaxError, ValueError):
if qvalue in ['true', 'false']:
qvalue = bool(qvalue)
else:
qvalue = default
setattr(self.mne, qparam, qvalue)
# Initialize channel-colors for faster indexing later
self.mne.ch_color_ref = dict()
for idx, ch_name in enumerate(self.mne.ch_names):
ch_type = self.mne.ch_types[idx]
self.mne.ch_color_ref[ch_name] = self.mne.ch_color_dict[ch_type]
# Initialize epoch colors for faster indexing later
if self.mne.is_epochs:
if self.mne.epoch_colors is None:
self.mne.epoch_color_ref = \
np.repeat([to_rgba_array(c) for c
in self.mne.ch_color_ref.values()],
len(self.mne.inst), axis=1)
else:
self.mne.epoch_color_ref = np.empty((len(self.mne.ch_names),
len(self.mne.inst), 4))
for epo_idx, epo in enumerate(self.mne.epoch_colors):
for ch_idx, color in enumerate(epo):
self.mne.epoch_color_ref[ch_idx, epo_idx] = \
to_rgba_array(color)
# Mark bad epochs
self.mne.epoch_color_ref[:, self.mne.bad_epochs] = \
to_rgba_array(self.mne.epoch_color_bad)
# Mark bad channels
bad_idxs = np.in1d(self.mne.ch_names, self.mne.info['bads'])
self.mne.epoch_color_ref[bad_idxs, :] = \
to_rgba_array(self.mne.ch_color_bad)
# Add Load-Progressbar for loading in a thread
self.mne.load_prog_label = QLabel('Loading...')
self.statusBar().addWidget(self.mne.load_prog_label)
self.mne.load_prog_label.hide()
self.mne.load_progressbar = QProgressBar()
# Set to n_chunks of LoadRunner
self.mne.load_progressbar.setMaximum(10)
self.statusBar().addWidget(self.mne.load_progressbar, stretch=1)
self.mne.load_progressbar.hide()
# A QThread for preloading
self.load_thread = LoadThread(self)
# Create centralWidget and layout
widget = QWidget()
layout = QGridLayout()
# Initialize Axis-Items
self.mne.time_axis = TimeAxis(self.mne)
self.mne.time_axis.setLabel(text='Time', units='s')
self.mne.channel_axis = ChannelAxis(self)
self.mne.viewbox = RawViewBox(self)
# Start precomputing if enabled
self._init_precompute()
# Initialize data (needed in DataTrace.update_data).
self._update_data()
# Initialize Trace-Plot
self.mne.plt = PlotItem(viewBox=self.mne.viewbox,
axisItems={'bottom': self.mne.time_axis,
'left': self.mne.channel_axis})
# Hide AutoRange-Button
self.mne.plt.hideButtons()
# Configure XY-Range
if self.mne.is_epochs:
self.mne.xmax = len(self.mne.inst.times) * len(self.mne.inst) \
/ self.mne.info['sfreq']
else:
self.mne.xmax = self.mne.inst.times[-1]
# Add one empty line as padding at top (y=0).
# Negative Y-Axis to display channels from top.
self.mne.ymax = len(self.mne.ch_order) + 1
self.mne.plt.setLimits(xMin=0, xMax=self.mne.xmax,
yMin=0, yMax=self.mne.ymax)
# Connect Signals from PlotItem
self.mne.plt.sigXRangeChanged.connect(self._xrange_changed)
self.mne.plt.sigYRangeChanged.connect(self._yrange_changed)
# Add traces
for ch_idx in self.mne.picks:
DataTrace(self, ch_idx)
# Initialize Epochs Grid
if self.mne.is_epochs:
grid_pen = mkPen(color='k', width=2, style=Qt.DashLine)
for x_grid in self.mne.boundary_times[1:-1]:
grid_line = InfiniteLine(pos=x_grid,
pen=grid_pen,
movable=False)
self.mne.plt.addItem(grid_line)
# Add events
if getattr(self.mne, 'event_nums', None) is not None:
self.mne.events_visible = True
for ev_time, ev_id in zip(self.mne.event_times,
self.mne.event_nums):
color = self.mne.event_color_dict[ev_id]
event_line = EventLine(ev_time, ev_id, color)
self.mne.event_lines.append(event_line)
if 0 < ev_time < self.mne.duration:
self.mne.plt.addItem(event_line)
else:
self.mne.events_visible = False
# Add Scale-Bars
self._add_scalebars()
# Check for OpenGL
if self.mne.use_opengl is None: # default: opt-in
self.mne.use_opengl = (
get_config('MNE_BROWSE_USE_OPENGL', '').lower() == 'true')
# Epochs currently only work with OpenGL enabled
# (https://github.com/mne-tools/mne-qt-browser/issues/53)
mac_epochs = self.mne.is_epochs and sys.platform == 'darwin'
if mac_epochs:
self.mne.use_opengl = True
if self.mne.use_opengl:
try:
import OpenGL
except (ModuleNotFoundError, ImportError):
warn('PyOpenGL was not found and OpenGL can\'t be used!\n'
'Consider installing pyopengl with pip or conda'
'or set "use_opengl" to False to avoid this warning.')
if mac_epochs:
warn('Plotting epochs on MacOS without OpenGL'
'may be unstable!')
self.mne.use_opengl = False
else:
logger.info(
f'Using pyopengl with version {OpenGL.__version__}')
self.mne.view = BrowserView(self.mne.plt,
useOpenGL=self.mne.use_opengl,
background='w')
if hasattr(self.mne, 'bgcolor'):
bgcolor = self.mne.bgcolor
else:
bgcolor = 'w'
self.mne.view.setBackground(_get_color(bgcolor))
layout.addWidget(self.mne.view, 0, 0)
self.mne.ax_hscroll = TimeScrollBar(self.mne)
layout.addWidget(self.mne.ax_hscroll, 1, 0, 1, 2)
self.mne.ax_vscroll = ChannelScrollBar(self.mne)
layout.addWidget(self.mne.ax_vscroll, 0, 1)
self.mne.vline = None
self.mne.vline_visible = False
self.mne.crosshair_enabled = False
self.mne.crosshair_h = None
self.mne.crosshair = None
self.mne.view.sigSceneMouseMoved.connect(self._mouse_moved)
self.mne.annotation_mode = False
if not self.mne.is_epochs:
self._init_annot_mode()
self.mne.overview_bar = OverviewBar(self)
layout.addWidget(self.mne.overview_bar, 2, 0, 1, 2)
self.overview_mode_chkbx = _FastToolTipComboBox()
self.overview_mode_chkbx.addItems(['empty', 'channels'])
tooltip = (
'<h2>Overview-Modes</h2>'
'<ul>'
'<li>empty:<br>'
'Display no background.</li>'
'<li>channels:<br>'
'Display each channel with its channel-type color.</li>'
'<li>zscore:<br>'
'Display the zscore for the data from each channel across time. '
'Red indicates high zscores, blue indicates low zscores, '
'and the boundaries of the color gradient are defined by the '
'minimum/maximum zscore.'
'This only works if precompute is set to "True", or if it is '
'enabled with "auto" and enough free RAM is available.</li>'
'</ul>')
self.overview_mode_chkbx.setToolTip(tooltip)
if self.mne.enable_precompute:
self.overview_mode_chkbx.addItems(['zscore'])
self.overview_mode_chkbx.setCurrentText(self.mne.overview_mode)
self.overview_mode_chkbx.currentTextChanged.connect(
self._overview_mode_changed)
self.overview_mode_chkbx.setFocusPolicy(Qt.NoFocus)
overview_mode_layout = QHBoxLayout()
overview_mode_layout.addWidget(QLabel('Overview-Mode:'))
overview_mode_layout.addWidget(self.overview_mode_chkbx)
overview_mode_widget = QWidget()
overview_mode_widget.setLayout(overview_mode_layout)
self.statusBar().addPermanentWidget(overview_mode_widget)
widget.setLayout(layout)
self.setCentralWidget(widget)
if getattr(self.mne, 'group_by', None) in ['position', 'selection']:
self._create_selection_fig()
if getattr(self.mne, 'show_options', False):
self._toggle_proj_fig()
self.mne.toolbar = self.addToolBar('Tools')
self.mne.toolbar.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
adecr_time = QAction(QIcon(":/less_time.svg"), '- Time', parent=self)
adecr_time.triggered.connect(partial(self.change_duration, -0.2))
self.mne.toolbar.addAction(adecr_time)
aincr_time = QAction(QIcon(":/more_time.svg"), '+ Time', parent=self)
aincr_time.triggered.connect(partial(self.change_duration, 0.25))
self.mne.toolbar.addAction(aincr_time)
adecr_nchan = QAction(QIcon(":/less_channels.svg"), '- Channels',
parent=self)
adecr_nchan.triggered.connect(partial(self.change_nchan, -10))
self.mne.toolbar.addAction(adecr_nchan)
aincr_nchan = QAction(QIcon(":/more_channels.svg"), '+ Channels',
parent=self)
aincr_nchan.triggered.connect(partial(self.change_nchan, 10))
self.mne.toolbar.addAction(aincr_nchan)
adecr_nchan = QAction(QIcon(":/zoom_out.svg"), 'Zoom Out', parent=self)
adecr_nchan.triggered.connect(partial(self.scale_all, 4 / 5))
self.mne.toolbar.addAction(adecr_nchan)
aincr_nchan = QAction(QIcon(":/zoom_in.svg"), 'Zoom In', parent=self)
aincr_nchan.triggered.connect(partial(self.scale_all, 5 / 4))
self.mne.toolbar.addAction(aincr_nchan)
if not self.mne.is_epochs:
atoggle_annot = QAction(QIcon(":/annotations.svg"), 'Annotations',
parent=self)
atoggle_annot.triggered.connect(self._toggle_annotation_fig)
self.mne.toolbar.addAction(atoggle_annot)
atoggle_proj = QAction(QIcon(":/ssp.svg"), 'SSP', parent=self)
atoggle_proj.triggered.connect(self._toggle_proj_fig)
self.mne.toolbar.addAction(atoggle_proj)
atoggle_fullscreen = QAction(QIcon(":/fullscreen.svg"), 'Fullscreen',
parent=self)
atoggle_fullscreen.triggered.connect(self._toggle_fullscreen)
self.mne.toolbar.addAction(atoggle_fullscreen)
asettings = QAction(QIcon(":/settings.svg"), 'Settings',
parent=self)
asettings.triggered.connect(self._toggle_settings_fig)
self.mne.toolbar.addAction(asettings)
ahelp = QAction(QIcon(":/help.svg"), 'Help', parent=self)
ahelp.triggered.connect(self._toggle_help_fig)
self.mne.toolbar.addAction(ahelp)
self.mne.plt.setXRange(self.mne.t_start,
self.mne.t_start + self.mne.duration,
padding=0)
if self.mne.butterfly:
self._set_butterfly(True)
else:
self.mne.plt.setYRange(0, self.mne.n_channels + 1, padding=0)
width = int(self.mne.figsize[0] * self.logicalDpiX())
height = int(self.mne.figsize[1] * self.logicalDpiY())
self.resize(width, height)
is_mac = platform.system() == 'Darwin'
dur_keys = ('fn + ←', 'fn + →') if is_mac else ('Home', 'End')
ch_keys = ('fn + ↑', 'fn + ↓') if is_mac else ('Page up', 'Page down')
hscroll_type = '1 epoch' if self.mne.is_epochs else '¼ page'
self.mne.keyboard_shortcuts = {
'left': {
'alias': '←',
'qt_key': Qt.Key_Left,
'modifier': [None, 'Shift'],
'slot': [self.hscroll],
'parameter': [-40, '-full'],
'description': [f'Scroll left ({hscroll_type})',
'Scroll left (full page)']
},
'right': {
'alias': '→',
'qt_key': Qt.Key_Right,
'modifier': [None, 'Shift'],
'slot': [self.hscroll],
'parameter': [40, '+full'],
'description': [f'Scroll right ({hscroll_type})',
'Scroll right (full page)']
},
'up': {
'alias': '↑',
'qt_key': Qt.Key_Up,
'slot': [self.vscroll],
'parameter': ['-full'],
'description': ['Scroll up (full page)']
},
'down': {
'alias': '↓',
'qt_key': Qt.Key_Down,
'slot': [self.vscroll],
'parameter': ['+full'],
'description': ['Scroll down (full page)']
},
'home': {
'alias': dur_keys[0],
'qt_key': Qt.Key_Home,
'slot': [self.change_duration],
'parameter': [-0.2],
'description': [f'Decrease duration ({hscroll_type})']
},
'end': {
'alias': dur_keys[1],
'qt_key': Qt.Key_End,
'slot': [self.change_duration],
'parameter': [0.25],
'description': [f'Increase duration ({hscroll_type})']
},
'pagedown': {
'alias': ch_keys[0],
'qt_key': Qt.Key_PageDown,
'modifier': [None, 'Shift'],
'slot': [self.change_nchan],
'parameter': [-1, -10],
'description': ['Decrease shown channels (1)',
'Decrease shown channels (10)']
},
'pageup': {
'alias': ch_keys[1],
'qt_key': Qt.Key_PageUp,
'modifier': [None, 'Shift'],
'slot': [self.change_nchan],
'parameter': [1, 10],
'description': ['Increase shown channels (1)',
'Increase shown channels (10)']
},
'-': {
'qt_key': Qt.Key_Minus,
'slot': [self.scale_all],
'parameter': [4 / 5],
'description': ['Decrease Scale']
},
'+': {
'qt_key': Qt.Key_Plus,
'slot': [self.scale_all],
'parameter': [5 / 4],
'description': ['Increase Scale']
},
'=': {
'qt_key': Qt.Key_Equal,
'slot': [self.scale_all],
'parameter': [5 / 4],
'description': ['Increase Scale']
},
'a': {
'qt_key': Qt.Key_A,
'slot': [self._toggle_annotation_fig,
self._toggle_annotations],
'modifier': [None, 'Shift'],
'description': ['Toggle Annotation-Tool',
'Toggle Annotations visible']
},
'b': {
'qt_key': Qt.Key_B,
'slot': [self._toggle_butterfly],
'description': ['Toggle Butterfly']
},
'd': {
'qt_key': Qt.Key_D,
'slot': [self._toggle_dc],
'description': ['Toggle DC-Correction']
},
'e': {
'qt_key': Qt.Key_E,
'slot': [self._toggle_events],
'description': ['Toggle Events visible']
},
'h': {
'qt_key': Qt.Key_H,
'slot': [self._toggle_epoch_histogram],
'description': ['Toggle Epoch-Histogram']
},
'j': {
'qt_key': Qt.Key_J,
'slot': [self._toggle_proj_fig,
self._toggle_all_projs],
'modifier': [None, 'Shift'],
'description': ['Toggle Projection Figure',
'Toggle all projections']
},
'l': {
'qt_key': Qt.Key_L,
'slot': [self._toggle_antialiasing],
'description': ['Toggle Antialiasing']
},
'o': {
'qt_key': Qt.Key_O,
'slot': [self._toggle_overview_bar],
'description': ['Toggle Overview-Bar']
},
't': {
'qt_key': Qt.Key_T,
'slot': [self._toggle_time_format],
'description': ['Toggle Time-Format']
},
's': {
'qt_key': Qt.Key_S,
'slot': [self._toggle_scalebars],
'description': ['Toggle Scalebars']
},
'w': {
'qt_key': Qt.Key_W,
'slot': [self._toggle_whitening],
'description': ['Toggle Whitening']
},
'x': {
'qt_key': Qt.Key_X,
'slot': [self._toggle_crosshair],
'description': ['Toggle Crosshair']
},
'z': {
'qt_key': Qt.Key_Z,
'slot': [self._toggle_zenmode],
'description': ['Toggle Zen-Mode']
},
'?': {
'qt_key': Qt.Key_Question,
'slot': [self._toggle_help_fig],
'description': ['Show Help']
},
'f11': {
'qt_key': Qt.Key_F11,
'slot': [self._toggle_fullscreen],
'description': ['Toggle Full-Screen']
},
'escape': {
'qt_key': Qt.Key_Escape,
'slot': [self.close],
'description': ['Close']
},
'enter': {
'qt_key': Qt.Key_Enter
},
' ': {
'qt_key': Qt.Key_Space
}
}
def _update_yaxis_labels(self):
self.mne.channel_axis.repaint()
def _add_scalebars(self):
self.mne.scalebars.clear()
ordered_types = self.mne.ch_types[self.mne.ch_order]
unique_type_idxs = np.unique(ordered_types,
return_index=True)[1]
ch_types_ordered = [ordered_types[idx] for idx
in sorted(unique_type_idxs)]
for ch_type in [ct for ct in ch_types_ordered
if ct != 'stim' and
ct in self.mne.scalings and
ct in getattr(self.mne, 'units', {}) and
ct in getattr(self.mne, 'unit_scalings', {})]:
scale_bar = ScaleBar(self.mne, ch_type)
self.mne.scalebars[ch_type] = scale_bar
self.mne.plt.addItem(scale_bar)
scale_bar_text = ScaleBarText(self.mne, ch_type)
self.mne.scalebar_texts[ch_type] = scale_bar_text
self.mne.plt.addItem(scale_bar_text)
self._set_scalebars_visible(self.mne.scalebars_visible)
def _update_scalebar_x_positions(self):
if self.mne.scalebars_visible:
for scalebar in self.mne.scalebars.values():
scalebar.update_x_position()
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.update_x_position()
def _update_scalebar_y_positions(self):
if self.mne.scalebars_visible:
for scalebar in self.mne.scalebars.values():
scalebar.update_y_position()
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.update_y_position()
def _update_scalebar_values(self):
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.update_value()
def _set_scalebars_visible(self, visible):
for scalebar in self.mne.scalebars.values():
scalebar.setVisible(visible)
for scalebar_text in self.mne.scalebar_texts.values():
scalebar_text.setVisible(visible)
self._update_scalebar_y_positions()
def _toggle_scalebars(self):
self.mne.scalebars_visible = not self.mne.scalebars_visible
self._set_scalebars_visible(self.mne.scalebars_visible)
def _overview_mode_changed(self, new_mode):
self.mne.overview_mode = new_mode
if self.mne.overview_mode == 'zscore':
while self.mne.zscore_rgba is None:
QApplication.processEvents()
self.mne.overview_bar.set_background()
def scale_all(self, step):
self.mne.scale_factor *= step
if self.mne.clipping is not None:
self._update_data()
for line in self.mne.traces:
line.update_scale()
self._update_scalebar_values()
def hscroll(self, step):
if step == '+full':
rel_step = self.mne.duration
elif step == '-full':
rel_step = - self.mne.duration
elif self.mne.is_epochs:
direction = 1 if step > 0 else -1
rel_step = direction * self.mne.duration / self.mne.n_epochs
else:
rel_step = step * self.mne.duration / self.mne.scroll_sensitivity
xmin, xmax = [i + rel_step for i in self.mne.viewbox.viewRange()[0]]
if xmin < 0:
xmin = 0
xmax = xmin + self.mne.duration
elif xmax > self.mne.xmax:
xmax = self.mne.xmax
xmin = xmax - self.mne.duration
self.mne.plt.setXRange(xmin, xmax, padding=0)
def vscroll(self, step):
if self.mne.fig_selection is not None:
if step == '+full':
step = 1
elif step == '-full':
step = -1
else:
step = int(step)
self.mne.fig_selection._scroll_selection(step)
elif self.mne.butterfly:
return
else:
if step == '+full':
step = self.mne.n_channels
elif step == '-full':
step = - self.mne.n_channels
ymin, ymax = [i + step for i in self.mne.viewbox.viewRange()[1]]
if ymin < 0:
ymin = 0
ymax = self.mne.n_channels + 1
elif ymax > self.mne.ymax:
ymax = self.mne.ymax
ymin = ymax - self.mne.n_channels - 1
self.mne.plt.setYRange(ymin, ymax, padding=0)
def change_duration(self, step):
xmin, xmax = self.mne.viewbox.viewRange()[0]
if self.mne.is_epochs:
min_dur = len(self.mne.inst.times) / self.mne.info['sfreq']
step_dir = (1 if step > 0 else -1)
rel_step = min_dur * step_dir
self.mne.n_epochs = np.clip(self.mne.n_epochs + step_dir,
1, len(self.mne.inst))
else:
min_dur = 3 * np.diff(self.mne.inst.times[:2])[0]
rel_step = self.mne.duration * step
xmax += rel_step
if xmax - xmin < min_dur:
xmax = xmin + min_dur
if xmax > self.mne.xmax:
diff = xmax - self.mne.xmax
xmax = self.mne.xmax
xmin -= diff
if xmin < 0:
xmin = 0
self.mne.ax_hscroll.update_duration()
self.mne.plt.setXRange(xmin, xmax, padding=0)
def change_nchan(self, step):
if not self.mne.butterfly:
if step == '+full':
step = self.mne.n_channels
elif step == '-full':
step = - self.mne.n_channels
ymin, ymax = self.mne.viewbox.viewRange()[1]
ymax += step
if ymax > self.mne.ymax:
ymax = self.mne.ymax
ymin -= step
if ymin < 0:
ymin = 0
if ymax - ymin <= 2:
ymax = ymin + 2
self.mne.ax_vscroll.update_nchan()
self.mne.plt.setYRange(ymin, ymax, padding=0)
def _remove_vline(self):
if self.mne.vline is not None:
if self.mne.is_epochs:
for vline in self.mne.vline:
self.mne.plt.removeItem(vline)
else:
self.mne.plt.removeItem(self.mne.vline)
self.mne.vline = None
self.mne.vline_visible = False
self.mne.overview_bar.update_vline()
def _get_vline_times(self, t):
rel_time = t % self.mne.epoch_dur
abs_time = self.mne.times[0]
ts = np.arange(
self.mne.n_epochs) * self.mne.epoch_dur + abs_time + rel_time
return ts
def _vline_slot(self, orig_vline):
if self.mne.is_epochs:
ts = self._get_vline_times(orig_vline.value())
for vl, xt in zip(self.mne.vline, ts):
if vl != orig_vline:
vl.setPos(xt)
self.mne.overview_bar.update_vline()
def _add_vline(self, t):
if self.mne.is_epochs:
ts = self._get_vline_times(t)
if self.mne.vline is None:
self.mne.vline = list()
for xt in ts:
epo_idx = np.clip(
np.searchsorted(self.mne.boundary_times, xt) - 1,
0, len(self.mne.inst))
bmin, bmax = self.mne.boundary_times[epo_idx:epo_idx + 2]
bmax -= 1 / self.mne.info['sfreq']
vl = VLine(self.mne, xt, bounds=(bmin, bmax))
vl.sigPositionChangeFinished.connect(self._vline_slot)
self.mne.vline.append(vl)
self.mne.plt.addItem(vl)
else:
for vl, xt in zip(self.mne.vline, ts):
vl.setPos(xt)
else:
if self.mne.vline is None:
self.mne.vline = VLine(self.mne, t, bounds=(0, self.mne.xmax))
self.mne.vline.sigPositionChangeFinished.connect(
self._vline_slot)
self.mne.plt.addItem(self.mne.vline)
else:
self.mne.vline.setPos(t)
self.mne.vline_visible = True
self.mne.overview_bar.update_vline()
def _mouse_moved(self, pos):
if self.mne.crosshair_enabled:
if self.mne.plt.sceneBoundingRect().contains(pos):
mousePoint = self.mne.viewbox.mapSceneToView(pos)
x, y = mousePoint.x(), mousePoint.y()
if (0 <= x <= self.mne.xmax and
0 <= y <= self.mne.ymax):
if not self.mne.crosshair:
self.mne.crosshair = Crosshair()
self.mne.plt.addItem(self.mne.crosshair,
ignoreBounds=True)
trace = [tr for tr in self.mne.traces if
tr.ypos - 0.5 < y < tr.ypos + 0.5]
if len(trace) == 1:
trace = trace[0]
idx = np.searchsorted(self.mne.times, x)
if self.mne.data_precomputed:
data = self.mne.data[trace.order_idx]
else:
data = self.mne.data[trace.range_idx]
yvalue = data[idx]
yshown = yvalue + trace.ypos
self.mne.crosshair.set_data(x, yshown)
if self.mne.is_epochs:
rel_idx = idx % len(self.mne.inst.times)
x = self.mne.inst.times[rel_idx]
scaler = -1 if self.mne.butterfly else -2
inv_norm = (scaler *
self.mne.scalings[trace.ch_type] *
self.mne.unit_scalings[trace.ch_type] /
self.mne.scale_factor)
label = f'{_simplify_float(yvalue * inv_norm)} ' \
f'{self.mne.units[trace.ch_type]}'
self.statusBar().showMessage(f'x={x:.3f} s, '
f'y={label}')
def _toggle_crosshair(self):
self.mne.crosshair_enabled = not self.mne.crosshair_enabled
if self.mne.crosshair:
self.mne.plt.removeItem(self.mne.crosshair)
self.mne.crosshair = None
def _xrange_changed(self, _, xrange):
if self.mne.is_epochs:
if self.mne.vline is not None:
rel_vl_t = self.mne.vline[0].value() \
- self.mne.boundary_times[self.mne.epoch_idx][0]
boundary_idxs = np.searchsorted(self.mne.midpoints, xrange)
self.mne.epoch_idx = np.arange(*boundary_idxs)
for trace in self.mne.traces:
trace.update_color()
if self.mne.vline is not None:
for bmin, bmax, vl in zip(self.mne.boundary_times[
self.mne.epoch_idx],
self.mne.boundary_times[
self.mne.epoch_idx + 1],
self.mne.vline):
bmax -= 1 / self.mne.info['sfreq']
vl.setBounds((bmin, bmax))
vl.setValue(bmin + rel_vl_t)
self.mne.t_start = xrange[0]
self.mne.duration = xrange[1] - xrange[0]
self._redraw(update_data=True)
if not self.mne.is_epochs:
self._update_annotations_xrange(xrange)
self._update_events_xrange(xrange)
self.mne.ax_hscroll.update_value(xrange[0])
self.mne.overview_bar.update_viewrange()
self._update_scalebar_x_positions()
def _update_events_xrange(self, xrange):
if self.mne.events_visible:
for ev_line in self.mne.event_lines:
if xrange[0] < ev_line.pos().x() < xrange[1]:
if ev_line not in self.mne.plt.items:
self.mne.plt.addItem(ev_line)
else:
if ev_line in self.mne.plt.items:
self.mne.plt.removeItem(ev_line)
def _update_annotations_xrange(self, xrange):
if self.mne.annotations_visible:
for region in self.mne.regions:
if self.mne.visible_annotations[region.description]:
rmin, rmax = region.getRegion()
xmin, xmax = xrange
comparisons = [rmin < xmin,
rmin < xmax,
rmax < xmin,
rmax < xmax]
if all(comparisons) or not any(comparisons):
if region in self.mne.plt.items:
self.mne.plt.removeItem(region)
self.mne.plt.removeItem(region.label_item)
else:
if region not in self.mne.plt.items:
self.mne.plt.addItem(region)
self.mne.plt.addItem(region.label_item)
def _yrange_changed(self, _, yrange):
if not self.mne.butterfly:
if not self.mne.fig_selection:
self.mne.ch_start = np.clip(round(yrange[0]), 0,
len(self.mne.ch_order)
- self.mne.n_channels)
self.mne.n_channels = round(yrange[1] - yrange[0] - 1)
self._update_picks()
self.mne.ax_vscroll.update_value(self.mne.ch_start)
self._update_data()
self.mne.overview_bar.update_viewrange()
self._update_scalebar_y_positions()
off_traces = [tr for tr in self.mne.traces
if tr.ch_idx not in self.mne.picks]
add_idxs = [p for p in self.mne.picks
if p not in [tr.ch_idx for tr in self.mne.traces]]
for trace in [tr for tr in self.mne.traces if tr not in off_traces]:
trace.update_range_idx()
trace_diff = len(self.mne.picks) - len(self.mne.traces)
if trace_diff < 0:
remove_traces = off_traces[:abs(trace_diff)]
for trace in remove_traces:
trace.remove()
off_traces.remove(trace)
if trace_diff > 0:
idxs_copy = add_idxs.copy()
for aidx in idxs_copy[:trace_diff]:
DataTrace(self, aidx)
add_idxs.remove(aidx)
for trace, ch_idx in zip(off_traces, add_idxs):
trace.set_ch_idx(ch_idx)
trace.update_color()
trace.update_data()
ute(self):
self.mne.data_precomputed = False
if all([hasattr(self.mne, st)
for st in ['global_data', 'global_times']]):
del self.mne.global_data, self.mne.global_times
gc.collect()
if self.mne.precompute == 'auto':
self.mne.enable_precompute = self._check_space_for_precompute()
elif isinstance(self.mne.precompute, bool):
self.mne.enable_precompute = self.mne.precompute
if self.mne.enable_precompute:
self.mne.load_progressbar.show()
self.mne.load_prog_label.show()
self.load_thread.start()
def _rerun_precompute(self):
if self.load_thread.isRunning():
self._rerun_load_thread = True
else:
self._init_precompute()
def _check_space_for_precompute(self):
try:
import psutil
except ImportError:
logger.info('Free RAM space could not be determined because'
'"psutil" is not installed. '
'Setting precompute to False.')
return False
else:
if self.mne.is_epochs:
files = [self.mne.inst.filename]
else:
files = self.mne.inst.filenames
if files[0] is not None:
disk_space = 0
for fn in files:
disk_space += getsize(fn)
fmt_multipliers = {'double': 1,
'single': 2,
'int': 2,
'short': 4}
# on disk
fmt = getattr(self.mne.inst, 'orig_format', 'single')
# Apply size change to 64-bit float in memory
# (* 2 because when loading data will be loaded into a copy
# of self.mne.inst._data to apply processing.
expected_ram = disk_space * fmt_multipliers[fmt] * 2
else:
expected_ram = sys.getsizeof(self.mne.inst._data)
# Get available RAM
free_ram = psutil.virtual_memory().free
expected_ram_str = sizeof_fmt(expected_ram)
free_ram_str = sizeof_fmt(free_ram)
left_ram_str = sizeof_fmt(free_ram - expected_ram)
if expected_ram < free_ram:
logger.debug('The data precomputed for visualization takes '
f'{expected_ram_str} with {left_ram_str} of '
f'RAM left.')
return True
else:
logger.debug(f'The precomputed data with {expected_ram_str} '
f'will surpass your current {free_ram_str} '
f'of free RAM.\n'
'Thus precompute will be set to False.\n'
'(If you want to precompute nevertheless, '
'then set precompute to True instead of "auto")')
return False
def _process_data(self, data, start, stop, picks,
signals=None):
data = super()._process_data(data, start, stop, picks, signals)
# Invert Data to be displayed from top on inverted Y-Axis
data *= -1
return data
def _update_data(self):
if self.mne.data_precomputed:
# get start/stop-samples
start, stop = self._get_start_stop()
self.mne.times = self.mne.global_times[start:stop]
self.mne.data = self.mne.global_data[:, start:stop]
# remove DC locally
if self.mne.remove_dc:
self.mne.data = self.mne.data - \
self.mne.data.mean(axis=1, keepdims=True)
else:
# While data is not precomputed get data only from shown range and
# process only those.
super()._update_data()
# Initialize decim
self.mne.decim_data = np.ones_like(self.mne.picks)
data_picks_mask = np.in1d(self.mne.picks, self.mne.picks_data)
self.mne.decim_data[data_picks_mask] = self.mne.decim
# Get decim_times
if self.mne.decim != 1:
# decim can vary by channel type,
# so compute different `times` vectors.
self.mne.decim_times = {decim_value: self.mne.times[::decim_value]
+ self.mne.first_time for decim_value
in set(self.mne.decim_data)}
# Apply clipping
if self.mne.clipping == 'clamp':
self.mne.data = np.clip(self.mne.data, -0.5, 0.5)
elif self.mne.clipping is not None:
self.mne.data = self.mne.data.copy()
self.mne.data[abs(self.mne.data * self.mne.scale_factor)
> self.mne.clipping] = np.nan
# Apply Downsampling (if enabled)
self._apply_downsampling()
def _get_zscore(self, data):
# Reshape data to reasonable size for display
if QApplication.desktop() is None:
max_pixel_width = 3840 # default=UHD
else:
max_pixel_width = QApplication.desktop().screenGeometry().width()
collapse_by = data.shape[1] // max_pixel_width
data = data[:, :max_pixel_width * collapse_by]
if collapse_by > 0:
data = data.reshape(data.shape[0], max_pixel_width, collapse_by)
data = data.mean(axis=2)
z = zscore(data, axis=1)
if z.size > 0:
zmin = np.min(z, axis=1)
zmax = np.max(z, axis=1)
# Convert into RGBA
zrgba = np.empty((*z.shape, 4))
for row_idx, row in enumerate(z):
for col_idx, value in enumerate(row):
if math.isnan(value):
value = 0
if value == 0:
rgba = [0, 0, 0, 0]
elif value < 0:
alpha = int(255 * value / abs(zmin[row_idx]))
rgba = [0, 0, 255, alpha]
else:
alpha = int(255 * value / zmax[row_idx])
rgba = [255, 0, 0, alpha]
zrgba[row_idx, col_idx] = rgba
zrgba = np.require(zrgba, np.uint8, 'C')
self.mne.zscore_rgba = zrgba
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ANNOTATIONS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _add_region(self, plot_onset, duration, description, region=None):
if not region:
region = AnnotRegion(self.mne, description=description,
values=(plot_onset, plot_onset + duration))
if (any([self.mne.t_start < v < self.mne.t_start + self.mne.duration
for v in [plot_onset, plot_onset + duration]]) and
region not in self.mne.plt.items):
self.mne.plt.addItem(region)
self.mne.plt.addItem(region.label_item)
region.regionChangeFinished.connect(self._region_changed)
region.gotSelected.connect(self._region_selected)
region.removeRequested.connect(self._remove_region)
self.mne.viewbox.sigYRangeChanged.connect(region.update_label_pos)
self.mne.regions.append(region)
region.update_label_pos()
def _remove_region(self, region, from_annot=True):
# Remove from shown regions
if region.label_item in self.mne.viewbox.addedItems:
self.mne.viewbox.removeItem(region.label_item)
if region in self.mne.plt.items:
self.mne.plt.removeItem(region)
# Remove from all regions
if region in self.mne.regions:
self.mne.regions.remove(region)
# Reset selected region
if region == self.mne.selected_region:
self.mne.selected_region = None
# Remove from annotations
if from_annot:
idx = self._get_onset_idx(region.getRegion()[0])
self.mne.inst.annotations.delete(idx)
# Update Overview-Bar
self.mne.overview_bar.update_annotations()
def _region_selected(self, region):
old_region = self.mne.selected_region
# Remove selected-status from old region
if old_region and old_region != region:
old_region.select(False)
self.mne.selected_region = region
self.mne.fig_annotation.update_values(region)
def _get_onset_idx(self, plot_onset):
onset = _sync_onset(self.mne.inst, plot_onset, inverse=True)
idx = np.where(self.mne.inst.annotations.onset == onset)[0][0]
return idx
def _region_changed(self, region):
rgn = region.getRegion()
region.select(True)
idx = self._get_onset_idx(region.old_onset)
# Update Spinboxes of Annot-Dock
self.mne.fig_annotation.update_values(region)
# Change annotations
self.mne.inst.annotations.onset[idx] = _sync_onset(self.mne.inst,
rgn[0],
inverse=True)
self.mne.inst.annotations.duration[idx] = rgn[1] - rgn[0]
# Update overview-bar
self.mne.overview_bar.update_annotations()
def _draw_annotations(self):
# All regions are constantly added to the Scene and handled by Qt
# which is faster than handling adding/removing in Python.
pass
def _init_annot_mode(self):
self.mne.annotations_visible = True
self.mne.new_annotation_labels = self._get_annotation_labels()
if len(self.mne.new_annotation_labels) > 0:
self.mne.current_description = self.mne.new_annotation_labels[0]
else:
self.mne.current_description = None
self._setup_annotation_colors()
self.mne.regions = list()
self.mne.selected_region = None
# Initialize Annotation-Dock
existing_dock = getattr(self.mne, 'fig_annotation', None)
if existing_dock is None:
self.mne.fig_annotation = AnnotationDock(self)
self.addDockWidget(Qt.TopDockWidgetArea, self.mne.fig_annotation)
self.mne.fig_annotation.setVisible(False)
# Add annotations as regions
for annot in self.mne.inst.annotations:
plot_onset = _sync_onset(self.mne.inst, annot['onset'])
duration = annot['duration']
description = annot['description']
self._add_region(plot_onset, duration, description)
# Initialize showing annotation widgets
self._change_annot_mode()
def _change_annot_mode(self):
if not self.mne.annotation_mode:
# Reset Widgets in Annotation-Figure
self.mne.fig_annotation.reset()
# Show Annotation-Dock if activated.
self.mne.fig_annotation.setVisible(self.mne.annotation_mode)
# Make Regions movable if activated and move into foreground
for region in self.mne.regions:
region.setMovable(self.mne.annotation_mode)
if self.mne.annotation_mode:
region.setZValue(2)
else:
region.setZValue(0)
# Add/Remove selection-rectangle.
if self.mne.selected_region:
self.mne.selected_region.select(self.mne.annotation_mode)
def _toggle_annotation_fig(self):
if not self.mne.is_epochs:
self.mne.annotation_mode = not self.mne.annotation_mode
self._change_annot_mode()
def _update_regions_visible(self):
for region in self.mne.regions:
region.update_visible(
self.mne.visible_annotations[region.description])
self.mne.overview_bar.update_annotations()
def _set_annotations_visible(self, visible):
for descr in self.mne.visible_annotations:
self.mne.visible_annotations[descr] = visible
self._update_regions_visible()
# Update Plot
if visible:
self._update_annotations_xrange((self.mne.t_start,
self.mne.t_start +
self.mne.duration))
else:
for region in [r for r in self.mne.regions
if r in self.mne.plt.items]:
self.mne.plt.removeItem(region)
self.mne.plt.removeItem(region.label_item)
def _toggle_annotations(self):
self.mne.annotations_visible = not self.mne.annotations_visible
self._set_annotations_visible(self.mne.annotations_visible)
def _apply_update_projectors(self, toggle_all=False):
if toggle_all:
on = self.mne.projs_on
applied = self.mne.projs_active
value = False if all(on) else True
new_state = np.full_like(on, value)
# Always activate applied projections
new_state[applied] = True
self.mne.projs_on = new_state
self._update_projector()
# If data was precomputed it needs to be precomputed again.
self._rerun_precompute()
self._redraw()
def _toggle_proj_fig(self):
if self.mne.fig_proj is None:
ProjDialog(self, name='fig_proj')
else:
self.mne.fig_proj.close()
def _toggle_all_projs(self):
if self.mne.fig_proj is None:
self._apply_update_projectors(toggle_all=True)
else:
self.mne.fig_proj.toggle_all()
def _toggle_whitening(self):
super()._toggle_whitening()
# If data was precomputed it needs to be precomputed again.
self._rerun_precompute()
self._redraw()
def _toggle_settings_fig(self):
if self.mne.fig_settings is None:
SettingsDialog(self, name='fig_settings')
else:
self.mne.fig_help.close()
self.mne.fig_help = None
def _toggle_help_fig(self):
if self.mne.fig_help is None:
HelpDialog(self, name='fig_help')
else:
self.mne.fig_help.close()
self.mne.fig_help = None
def _set_butterfly(self, butterfly):
self.mne.butterfly = butterfly
self._update_picks()
self._update_data()
if butterfly and self.mne.fig_selection is not None:
self.mne.selection_ypos_dict.clear()
selections_dict = self._make_butterfly_selections_dict()
for idx, picks in enumerate(selections_dict.values()):
for pick in picks:
self.mne.selection_ypos_dict[pick] = idx + 1
ymax = len(selections_dict) + 1
self.mne.ymax = ymax
self.mne.plt.setLimits(yMax=ymax)
self.mne.plt.setYRange(0, ymax, padding=0)
elif butterfly:
ymax = len(self.mne.butterfly_type_order) + 1
self.mne.ymax = ymax
self.mne.plt.setLimits(yMax=ymax)
self.mne.plt.setYRange(0, ymax, padding=0)
else:
self.mne.ymax = len(self.mne.ch_order) + 1
self.mne.plt.setLimits(yMax=self.mne.ymax)
self.mne.plt.setYRange(self.mne.ch_start,
self.mne.ch_start + self.mne.n_channels + 1,
padding=0)
if self.mne.fig_selection is not None:
# Update Selection-Dialog
self.mne.fig_selection._style_butterfly()
# Set vertical scrollbar visible
self.mne.ax_vscroll.setVisible(not butterfly or
self.mne.fig_selection is not None)
# update overview-bar
self.mne.overview_bar.update_viewrange()
# update ypos and color for butterfly-mode
for trace in self.mne.traces:
trace.update_color()
trace.update_ypos()
self._draw_traces()
def _toggle_butterfly(self):
if self.mne.instance_type != 'ica':
self._set_butterfly(not self.mne.butterfly)
def _toggle_dc(self):
self.mne.remove_dc = not self.mne.remove_dc
self._redraw()
def _toggle_epoch_histogram(self):
fig = self._create_epoch_histogram()
self._get_dlg_from_mpl(fig)
def _set_events_visible(self, visible):
for event_line in self.mne.event_lines:
event_line.setVisible(visible)
# Update Plot
if visible:
self._update_events_xrange((self.mne.t_start,
self.mne.t_start +
self.mne.duration))
else:
for event_line in [evl for evl in self.mne.event_lines
if evl in self.mne.plt.items]:
self.mne.plt.removeItem(event_line)
self.mne.overview_bar.update_events()
def _toggle_events(self):
if self.mne.event_nums is not None:
self.mne.events_visible = not self.mne.events_visible
self._set_events_visible(self.mne.events_visible)
def _toggle_time_format(self):
if self.mne.time_format == 'float':
self.mne.time_format = 'clock'
self.mne.time_axis.setLabel(text='Time')
else:
self.mne.time_format = 'float'
self.mne.time_axis.setLabel(text='Time', units='s')
self._update_yaxis_labels()
def _toggle_fullscreen(self):
if self.isFullScreen():
self.showNormal()
else:
self.showFullScreen()
def _toggle_antialiasing(self):
self.mne.antialiasing = not self.mne.antialiasing
self._redraw()
def _toggle_overview_bar(self):
self.mne.show_overview_bar = not self.mne.show_overview_bar
self.mne.overview_bar.setVisible(self.mne.show_overview_bar)
def _toggle_zenmode(self):
self.mne.scrollbars_visible = not self.mne.scrollbars_visible
for bar in [self.mne.ax_hscroll, self.mne.ax_vscroll]:
bar.setVisible(self.mne.scrollbars_visible)
self.mne.toolbar.setVisible(self.mne.scrollbars_visible)
def _new_child_figure(self, fig_name, window_title, **kwargs):
from matplotlib.figure import Figure
fig = Figure(**kwargs)
# Pass window title and fig_name on
if fig_name is not None:
fig.fig_name = fig_name
if window_title is not None:
fig.title = window_title
return fig
def _get_widget_from_mpl(self, fig):
canvas = FigureCanvasQTAgg(fig)
canvas.setFocusPolicy(Qt.StrongFocus | Qt.WheelFocus)
canvas.setFocus()
# Pass window title and fig_name on
if hasattr(fig, 'fig_name'):
canvas.fig_name = fig.fig_name
if hasattr(fig, 'title'):
canvas.title = fig.title
return canvas
def _get_dlg_from_mpl(self, fig):
canvas = self._get_widget_from_mpl(fig)
# Pass window title and fig_name on
if hasattr(canvas, 'fig_name'):
name = canvas.fig_name
else:
name = None
if hasattr(canvas, 'title'):
title = canvas.title
else:
title = None
dlg = _BaseDialog(self, widget=canvas, title=title, name=name)
dlg.show()
def _create_ch_context_fig(self, idx):
fig = super()._create_ch_context_fig(idx)
if fig is not None:
self._get_dlg_from_mpl(fig)
def _toggle_epoch_histogramm(self):
if self.mne.is_epochs:
fig = self._create_epoch_histogram()
if fig is not None:
self._get_dlg_from_mpl(fig)
def _create_selection_fig(self):
if not any([isinstance(fig, SelectionDialog) for
fig in self.mne.child_figs]):
SelectionDialog(self)
def message_box(self, text, info_text=None, buttons=None,
default_button=None, icon=None, modal=True):
self.msg_box.setText(f'<font size="+2"><b>{text}</b></font>')
if info_text is not None:
self.msg_box.setInformativeText(info_text)
if buttons is not None:
self.msg_box.setStandardButtons(buttons)
if default_button is not None:
self.msg_box.setDefaultButton(default_button)
if icon is not None:
self.msg_box.setIcon(icon)
# Allow interacting with message_box in test-mode.
# Set modal=False only if no return value is expected.
self.msg_box.setModal(False if self.test_mode else modal)
if self.test_mode or not modal:
self.msg_box.show()
else:
return self.msg_box.exec()
def keyPressEvent(self, event):
# On MacOs additionally KeypadModifier is set when arrow-keys
# are pressed.
# On Unix GroupSwitchModifier is set when ctrl is pressed.
# To preserve cross-platform consistency the following comparison
# of the modifier-values is done.
# modifiers need to be exclusive
modifiers = {
'Ctrl': '4' in hex(int(event.modifiers())),
'Shift': int(event.modifiers()) == 33554432
}
for key_name in self.mne.keyboard_shortcuts:
key_dict = self.mne.keyboard_shortcuts[key_name]
if key_dict['qt_key'] == event.key() and 'slot' in key_dict:
mod_idx = 0
# Get modifier
if 'modifier' in key_dict:
mods = [modifiers[mod] for mod in modifiers]
if any(mods):
# No multiple modifiers supported yet
mod = [mod for mod in modifiers if modifiers[mod]][0]
if mod in key_dict['modifier']:
mod_idx = key_dict['modifier'].index(mod)
slot_idx = mod_idx if mod_idx < len(key_dict['slot']) else 0
slot = key_dict['slot'][slot_idx]
if 'parameter' in key_dict:
param_idx = (mod_idx if mod_idx <
len(key_dict['parameter']) else 0)
slot(key_dict['parameter'][param_idx])
else:
slot()
break
def _draw_traces(self):
# Update data in traces (=drawing traces)
for trace in self.mne.traces:
# Update data
trace.update_data()
def _get_size(self):
inch_width = self.width() / self.logicalDpiX()
inch_height = self.height() / self.logicalDpiY()
return inch_width, inch_height
def _fake_keypress(self, key, fig=None):
fig = fig or self
if key.isupper():
key = key.lower()
modifier = Qt.ShiftModifier
elif key.startswith('shift+'):
key = key[6:]
modifier = Qt.ShiftModifier
else:
modifier = Qt.NoModifier
# Use pytest-qt's exception-hook
with capture_exceptions() as exceptions:
QTest.keyPress(fig, self.mne.keyboard_shortcuts[key]['qt_key'],
modifier)
for exc in exceptions:
raise RuntimeError(f'There as been an {exc[0]} inside the Qt '
f'event loop (look above for traceback).')
def _fake_click(self, point, add_points=None, fig=None, ax=None,
xform='ax', button=1, kind='press'):
add_points = add_points or list()
QTest.qWaitForWindowExposed(self)
QTest.qWait(10)
if button == 1:
button = Qt.LeftButton
else:
button = Qt.RightButton
fig = ax or fig or self.mne.view
if xform == 'ax':
view_width = fig.width()
view_height = fig.height()
x = view_width * point[0]
y = view_height * (1 - point[1])
point = Point(x, y)
for idx, apoint in enumerate(add_points):
x2 = view_width * apoint[0]
y2 = view_height * (1 - apoint[1])
add_points[idx] = Point(x2, y2)
elif xform == 'data':
fig = self.mne.view
point = self.mne.viewbox.mapViewToScene(Point(*point))
for idx, apoint in enumerate(add_points):
add_points[idx] = self.mne.viewbox.mapViewToScene(
Point(*apoint))
elif xform == 'none' or xform is None:
if isinstance(point, (tuple, list)):
point = Point(*point)
else:
point = Point(point)
for idx, apoint in enumerate(add_points):
if isinstance(apoint, (tuple, list)):
add_points[idx] = Point(*apoint)
else:
add_points[idx] = Point(apoint)
with capture_exceptions() as exceptions:
widget = fig.viewport() if isinstance(fig, QGraphicsView) else fig
if kind == 'press':
# always click because most interactivity comes form
# mouseClickEvent from pyqtgraph (just press doesn't suffice
_mouseClick(widget=widget, pos=point, button=button)
elif kind == 'release':
_mouseRelease(widget=widget, pos=point, button=button)
elif kind == 'motion':
_mouseMove(widget=widget, pos=point, buttons=button)
elif kind == 'drag':
_mouseDrag(widget=widget, positions=[point] + add_points,
button=button)
for exc in exceptions:
raise RuntimeError(f'There as been an {exc[0]} inside the Qt '
f'event loop (look above for traceback).')
QTest.qWait(50)
def _fake_scroll(self, x, y, step, fig=None):
self.vscroll(step)
def _click_ch_name(self, ch_index, button):
self.mne.channel_axis.repaint()
# Wait because channel-axis may need time
# (came up with test_epochs::test_plot_epochs_clicks)
QTest.qWait(100)
if not self.mne.butterfly:
ch_name = self.mne.ch_names[self.mne.picks[ch_index]]
xrange, yrange = self.mne.channel_axis.ch_texts[ch_name]
x = np.mean(xrange)
y = np.mean(yrange)
self._fake_click((x, y), fig=self.mne.view, button=button,
xform='none')
def _update_trace_offsets(self):
pass
def _resize_by_factor(self, factor):
pass
def _get_ticklabels(self, orientation):
if orientation == 'x':
ax = self.mne.time_axis
else:
ax = self.mne.channel_axis
return list(ax.get_labels())
def _get_scale_bar_texts(self):
return tuple(t.toPlainText() for t in self.mne.scalebar_texts.values())
def show(self):
# Set raise_window like matplotlib if possible
super().show()
try:
from matplotlib import rcParams
raise_window = rcParams['figure.raise_window']
except ImportError:
raise_window = True
if raise_window:
self.activateWindow()
self.raise_()
def _close_event(self, fig=None):
fig = fig or self
if hasattr(fig, 'canvas'):
try:
fig.canvas.close_event()
except ValueError: # old mpl with Qt
pass # pragma: no cover
else:
fig.close()
def closeEvent(self, event):
event.accept()
if hasattr(self, 'mne'):
# Explicit disconnects to avoid reference cycles that gc can't
if hasattr(self.mne, 'plt'):
_disconnect(self.mne.plt.sigXRangeChanged)
_disconnect(self.mne.plt.sigYRangeChanged)
if hasattr(self.mne, 'toolbar'):
for action in self.mne.toolbar.actions():
_disconnect(action.triggered)
for qsetting in qsettings_params:
value = getattr(self.mne, qsetting)
QSettings().setValue(qsetting, value)
for attr in ('keyboard_shortcuts', 'traces', 'plt', 'toolbar'):
if hasattr(self.mne, attr):
delattr(self.mne, attr)
if hasattr(self.mne, 'child_figs'):
for fig in self.mne.child_figs:
fig.close()
self.mne.child_figs.clear()
for attr in ('traces', 'event_lines', 'regions'):
getattr(self.mne, attr, []).clear()
if getattr(self.mne, 'vline', None) is not None:
if self.mne.is_epochs:
for vl in self.mne.vline:
_disconnect(vl.sigPositionChangeFinished)
self.mne.vline.clear()
else:
_disconnect(self.mne.vline.sigPositionChangeFinished)
if getattr(self, 'load_thread', None) is not None:
self.load_thread.clean()
self.load_thread = None
if self in _browser_instances:
_browser_instances.remove(self)
self._close(event)
self.gotClosed.emit()
self.deleteLater()
def _get_n_figs():
QTest.qWait(100)
return len([window for window in QApplication.topLevelWindows()
if window.isVisible()])
def _close_all():
if len(QApplication.topLevelWindows()) > 0:
QApplication.closeAllWindows()
def _mousePress(widget, pos, button, modifier=None):
if modifier is None:
modifier = Qt.KeyboardModifier.NoModifier
event = QMouseEvent(QEvent.Type.MouseButtonPress, pos, button,
Qt.MouseButton.NoButton, modifier)
QApplication.sendEvent(widget, event)
def _mouseRelease(widget, pos, button, modifier=None):
if modifier is None:
modifier = Qt.KeyboardModifier.NoModifier
event = QMouseEvent(QEvent.Type.MouseButtonRelease, pos,
button, Qt.MouseButton.NoButton, modifier)
QApplication.sendEvent(widget, event)
def _mouseMove(widget, pos, buttons=None, modifier=None):
if buttons is None:
buttons = Qt.MouseButton.NoButton
if modifier is None:
modifier = Qt.KeyboardModifier.NoModifier
event = QMouseEvent(QEvent.Type.MouseMove, pos,
Qt.MouseButton.NoButton, buttons, modifier)
QApplication.sendEvent(widget, event)
def _mouseClick(widget, pos, button, modifier=None):
_mouseMove(widget, pos)
_mousePress(widget, pos, button, modifier)
_mouseRelease(widget, pos, button, modifier)
def _mouseDrag(widget, positions, button, modifier=None):
_mouseMove(widget, positions[0])
_mousePress(widget, positions[0], button, modifier)
QTest.qWait(10)
for pos in positions[1:]:
_mouseMove(widget, pos, button, modifier)
_mouseRelease(widget, positions[-1], button, modifier)
def _init_browser(**kwargs):
setConfigOption('enableExperimental', True)
_init_mne_qtapp(pg_app=True)
browser = PyQtGraphBrowser(**kwargs)
return browser
| true | true |
1c39772396aa4a5b2ffc82f65daf4a0da685acbb | 1,081 | py | Python | Datasets/NumpyGenerator/NumpyRandomNormalDriftGenerator.py | Lucciola111/stream_autoencoder_windowing | 5456b07bd20220c987598db2cdb832d8195e1575 | [
"MIT"
] | 4 | 2021-09-16T05:50:25.000Z | 2021-12-31T07:04:55.000Z | Datasets/NumpyGenerator/NumpyRandomNormalDriftGenerator.py | Lucciola111/stream_autoencoder_windowing | 5456b07bd20220c987598db2cdb832d8195e1575 | [
"MIT"
] | null | null | null | Datasets/NumpyGenerator/NumpyRandomNormalDriftGenerator.py | Lucciola111/stream_autoencoder_windowing | 5456b07bd20220c987598db2cdb832d8195e1575 | [
"MIT"
] | 1 | 2021-12-16T06:53:08.000Z | 2021-12-16T06:53:08.000Z | from Datasets.NumpyGenerator.NumpyRandomNormalStreamGenerator import numpy_random_normal_stream_generator
def numpy_random_normal_drift_generator(
data_stream, mean_broken, var_broken, n_dimensions_broken, start_dimensions_broken):
"""
Parameters
----------
data_stream: data stream where drift should be introduced
mean_broken: the mean of the drift
var_broken: the standard deviation of the drift
n_dimensions_broken: number of affected dimensions
start_dimensions_broken: start dimension for broken dimensions
Returns a data stream with sudden drift in some dimensions
"""
n_instances = len(data_stream)
# Generate data with broken mean and var
dimensions_broken = numpy_random_normal_stream_generator(
mean=mean_broken, var=var_broken, n_instances=n_instances, n_dimensions=n_dimensions_broken)
# Replace selected dimensions with generated drift dimensions
data_stream[:, start_dimensions_broken:(start_dimensions_broken + n_dimensions_broken)] = dimensions_broken
return data_stream
| 37.275862 | 111 | 0.779833 | from Datasets.NumpyGenerator.NumpyRandomNormalStreamGenerator import numpy_random_normal_stream_generator
def numpy_random_normal_drift_generator(
data_stream, mean_broken, var_broken, n_dimensions_broken, start_dimensions_broken):
n_instances = len(data_stream)
dimensions_broken = numpy_random_normal_stream_generator(
mean=mean_broken, var=var_broken, n_instances=n_instances, n_dimensions=n_dimensions_broken)
data_stream[:, start_dimensions_broken:(start_dimensions_broken + n_dimensions_broken)] = dimensions_broken
return data_stream
| true | true |
1c3979aeaf8ca52f2b4e129cade43c4fe8b17290 | 1,730 | py | Python | table_enforcer/utils/validate/decorators.py | xguse/table_enforcer | f3137839574bf8ea933a14ea16a8acba45e3e0c3 | [
"MIT"
] | 13 | 2017-11-16T23:24:17.000Z | 2021-05-28T01:05:31.000Z | table_enforcer/utils/validate/decorators.py | xguse/table_enforcer | f3137839574bf8ea933a14ea16a8acba45e3e0c3 | [
"MIT"
] | 1 | 2019-09-26T18:34:21.000Z | 2021-10-12T17:18:24.000Z | table_enforcer/utils/validate/decorators.py | xguse/table_enforcer | f3137839574bf8ea933a14ea16a8acba45e3e0c3 | [
"MIT"
] | 1 | 2017-11-17T17:18:31.000Z | 2017-11-17T17:18:31.000Z | """Provide decoration functions to augment the behavior of validator functions."""
import functools
def minmax(low, high):
"""Test that the data items fall within range: low <= x <= high."""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
lo_pass = low <= series
hi_pass = series <= high
return lo_pass & hi_pass
return wrapper
return decorator
def choice(choices):
"""Test that the data items are members of the set `choices`."""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
return series.isin(set(choices))
return wrapper
return decorator
def bounded_length(low, high=None):
"""Test that the length of the data items fall within range: low <= x <= high.
If high is None, treat as exact length.
"""
def decorator(function):
"""Decorate a function with args."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap the function."""
series = function(*args, **kwargs)
if high is None:
return series.apply(lambda x: len(x) == low)
else:
lo_pass = series.apply(lambda x: low <= len(x))
hi_pass = series.apply(lambda x: len(x) <= high)
return lo_pass & hi_pass
return wrapper
return decorator
| 28.360656 | 82 | 0.569942 | import functools
def minmax(low, high):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
series = function(*args, **kwargs)
lo_pass = low <= series
hi_pass = series <= high
return lo_pass & hi_pass
return wrapper
return decorator
def choice(choices):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
series = function(*args, **kwargs)
return series.isin(set(choices))
return wrapper
return decorator
def bounded_length(low, high=None):
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
series = function(*args, **kwargs)
if high is None:
return series.apply(lambda x: len(x) == low)
else:
lo_pass = series.apply(lambda x: low <= len(x))
hi_pass = series.apply(lambda x: len(x) <= high)
return lo_pass & hi_pass
return wrapper
return decorator
| true | true |
1c397c1c2056b008156c3000c97255a269a556fd | 3,543 | py | Python | index.py | AaryanSinhaRoy/link-saver | 1970c3d1733d76891b59e3527662d0502456e379 | [
"MIT"
] | null | null | null | index.py | AaryanSinhaRoy/link-saver | 1970c3d1733d76891b59e3527662d0502456e379 | [
"MIT"
] | null | null | null | index.py | AaryanSinhaRoy/link-saver | 1970c3d1733d76891b59e3527662d0502456e379 | [
"MIT"
] | null | null | null | from flask import Flask, render_template,url_for, request, session, redirect
import sqlite3
import requests
from bs4 import BeautifulSoup
app = Flask(__name__)
app.secret_key='voat'
create_table="create table if not exists users ( username varchar(255), password varchar(255))"
conn=sqlite3.connect("database.db")
cur=conn.cursor()
cur.execute(create_table)
conn.commit()
@app.route('/')
def home():
return render_template("landingpage.html")
@app.route('/linksaver')
def linksaver():
if session['username']:
conn=sqlite3.connect("database.db")
cur=conn.cursor()
resdata=cur.execute("select linkname,thelink,username,ROWID from links where username=?",(session['username'],))
return render_template("index.html",resdata=resdata)
else:
return redirect("/")
@app.route('/login')
def loginpage():
return render_template("login.html")
@app.route('/signup')
def signuppage():
return render_template("signup.html")
@app.route('/process_signup', methods=["POST"])
def signup():
username=request.form['usrname']
password=request.form['pswrd']
conn=sqlite3.connect("database.db")
cur=conn.cursor()
usrs=cur.execute("select * from users where username=?",(username,))
if len(list(usrs))==1:
return "username already taken"
else:
cur.execute("insert into users (username, password) values(?,?)",(username,password))
conn.commit()
return render_template("login.html",)
@app.route('/process_login', methods=["POST"])
def login():
error=False
username=request.form['usrname']
password=request.form['pswrd']
conn=sqlite3.connect("database.db")
cur=conn.cursor()
res=cur.execute("select * from users where username=? and password=?",(username,password))
if len(list(res))>=1:
session['username']=username
return redirect(url_for("linksaver"))
else:
error=True
return render_template("login.html",error=error)
@app.route('/logout')
def logout():
session.pop('username')
return redirect(url_for('home'))
@app.route('/newlink', methods=['POST'])
def newlink():
if session['username']:
if request.method== "POST":
try:
linkerror=False
flag=False
thelink=request.form['thelink']
linkname=""
flag="an error occured, please try again"
page=requests.get(thelink)
soup=BeautifulSoup(page.text,'html.parser')
linkname=soup.title.get_text()
conn=sqlite3.connect("database.db")
cur=conn.cursor()
res=cur.execute("insert into links (linkname,thelink,username) values(?,?,?)",(linkname,thelink,session['username']))
conn.commit()
return redirect(url_for("linksaver"))
except requests.exceptions.MissingSchema:
linkerror=True
return render_template("index.html",linkerror=linkerror)
except:
flag=True
return render_template("index.html",flag=flag)
else:
return redirect("/login")
@app.route('/deletelink/<id>', methods=['GET','POST'])
def deletelink(id):
conn=sqlite3.connect("database.db")
cur=conn.cursor()
res=cur.execute("delete from links where ROWID=?",(id,))
conn.commit()
return redirect(url_for("linksaver"))
if __name__ == '__main__':
app.run(debug=True)
| 31.633929 | 130 | 0.62433 | from flask import Flask, render_template,url_for, request, session, redirect
import sqlite3
import requests
from bs4 import BeautifulSoup
app = Flask(__name__)
app.secret_key='voat'
create_table="create table if not exists users ( username varchar(255), password varchar(255))"
conn=sqlite3.connect("database.db")
cur=conn.cursor()
cur.execute(create_table)
conn.commit()
@app.route('/')
def home():
return render_template("landingpage.html")
@app.route('/linksaver')
def linksaver():
if session['username']:
conn=sqlite3.connect("database.db")
cur=conn.cursor()
resdata=cur.execute("select linkname,thelink,username,ROWID from links where username=?",(session['username'],))
return render_template("index.html",resdata=resdata)
else:
return redirect("/")
@app.route('/login')
def loginpage():
return render_template("login.html")
@app.route('/signup')
def signuppage():
return render_template("signup.html")
@app.route('/process_signup', methods=["POST"])
def signup():
username=request.form['usrname']
password=request.form['pswrd']
conn=sqlite3.connect("database.db")
cur=conn.cursor()
usrs=cur.execute("select * from users where username=?",(username,))
if len(list(usrs))==1:
return "username already taken"
else:
cur.execute("insert into users (username, password) values(?,?)",(username,password))
conn.commit()
return render_template("login.html",)
@app.route('/process_login', methods=["POST"])
def login():
error=False
username=request.form['usrname']
password=request.form['pswrd']
conn=sqlite3.connect("database.db")
cur=conn.cursor()
res=cur.execute("select * from users where username=? and password=?",(username,password))
if len(list(res))>=1:
session['username']=username
return redirect(url_for("linksaver"))
else:
error=True
return render_template("login.html",error=error)
@app.route('/logout')
def logout():
session.pop('username')
return redirect(url_for('home'))
@app.route('/newlink', methods=['POST'])
def newlink():
if session['username']:
if request.method== "POST":
try:
linkerror=False
flag=False
thelink=request.form['thelink']
linkname=""
flag="an error occured, please try again"
page=requests.get(thelink)
soup=BeautifulSoup(page.text,'html.parser')
linkname=soup.title.get_text()
conn=sqlite3.connect("database.db")
cur=conn.cursor()
res=cur.execute("insert into links (linkname,thelink,username) values(?,?,?)",(linkname,thelink,session['username']))
conn.commit()
return redirect(url_for("linksaver"))
except requests.exceptions.MissingSchema:
linkerror=True
return render_template("index.html",linkerror=linkerror)
except:
flag=True
return render_template("index.html",flag=flag)
else:
return redirect("/login")
@app.route('/deletelink/<id>', methods=['GET','POST'])
def deletelink(id):
conn=sqlite3.connect("database.db")
cur=conn.cursor()
res=cur.execute("delete from links where ROWID=?",(id,))
conn.commit()
return redirect(url_for("linksaver"))
if __name__ == '__main__':
app.run(debug=True)
| true | true |
1c397ccaa82943aaaf39168260a36d7c20628590 | 487 | py | Python | interview/leet/189_Rotate_Array.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2020-10-12T13:33:29.000Z | 2020-10-12T13:33:29.000Z | interview/leet/189_Rotate_Array.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | null | null | null | interview/leet/189_Rotate_Array.py | eroicaleo/LearningPython | 297d46eddce6e43ce0c160d2660dff5f5d616800 | [
"MIT"
] | 1 | 2016-11-09T07:28:45.000Z | 2016-11-09T07:28:45.000Z | #!/usr/bin/env python3
import math
class Solution:
def rotate(self, nums, k):
l = len(nums)
d = math.gcd(k, l)
for i in range(d):
p, q, prev = i, (i+k)%l, nums[i]
while q != i:
tmp = nums[q]
nums[q] = prev
p, q, prev = q, (q+k)%l, tmp
nums[i] = prev
return nums
nums = [1,2,3,4,5,6,7]
k = 3
nums = [-1,-100,3,99]
k = 2
sol = Solution()
print(sol.rotate(nums, k))
| 20.291667 | 44 | 0.439425 |
import math
class Solution:
def rotate(self, nums, k):
l = len(nums)
d = math.gcd(k, l)
for i in range(d):
p, q, prev = i, (i+k)%l, nums[i]
while q != i:
tmp = nums[q]
nums[q] = prev
p, q, prev = q, (q+k)%l, tmp
nums[i] = prev
return nums
nums = [1,2,3,4,5,6,7]
k = 3
nums = [-1,-100,3,99]
k = 2
sol = Solution()
print(sol.rotate(nums, k))
| true | true |
1c397d5a4701f90d305728e12748e4baba35860c | 151 | py | Python | SystemyMultimedialne/Lab07/tempCodeRunnerFile.py | arte00/informatyka | 642574f7c235d016b7a143a9957b26ddcaa09bee | [
"MIT"
] | null | null | null | SystemyMultimedialne/Lab07/tempCodeRunnerFile.py | arte00/informatyka | 642574f7c235d016b7a143a9957b26ddcaa09bee | [
"MIT"
] | null | null | null | SystemyMultimedialne/Lab07/tempCodeRunnerFile.py | arte00/informatyka | 642574f7c235d016b7a143a9957b26ddcaa09bee | [
"MIT"
] | null | null | null | sd.play(data[0:_max], samplerate=freq, blocking=True)
# sd.play(data1, samplerate=freq, blocking=True)
# sd.play(data2, samplerate=freq, blocking=True) | 50.333333 | 53 | 0.761589 | sd.play(data[0:_max], samplerate=freq, blocking=True)
| true | true |
1c397d6a6a1b5cda7f74f03891a7c53f87684eb5 | 52,992 | py | Python | pandas/tests/frame/test_reductions.py | k-fillmore/pandas | 67d4cae17bec45e84b9cf51bcf4fb5bbe293b26f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 13 | 2015-04-09T06:21:18.000Z | 2021-02-06T05:08:09.000Z | pandas/tests/frame/test_reductions.py | k-fillmore/pandas | 67d4cae17bec45e84b9cf51bcf4fb5bbe293b26f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2016-01-15T13:49:34.000Z | 2016-07-04T20:52:50.000Z | pandas/tests/frame/test_reductions.py | k-fillmore/pandas | 67d4cae17bec45e84b9cf51bcf4fb5bbe293b26f | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 9 | 2015-04-06T06:35:47.000Z | 2019-07-10T23:53:29.000Z | from datetime import timedelta
from decimal import Decimal
from dateutil.tz import tzlocal
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
def assert_stat_op_calc(
opname,
alternative,
frame,
has_skipna=True,
check_dtype=True,
check_dates=False,
rtol=1e-5,
atol=1e-8,
skipna_alternative=None,
):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
rtol : float, default 1e-5
Relative tolerance.
atol : float, default 1e-8
Absolute tolerance.
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
expected_warning = FutureWarning if opname in ["mean", "median"] else None
df = DataFrame({"b": date_range("1/1/2001", periods=2)})
with tm.assert_produces_warning(expected_warning):
result = getattr(df, opname)()
assert isinstance(result, Series)
df["a"] = range(len(df))
with tm.assert_produces_warning(expected_warning):
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative, skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(
result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol
)
# HACK: win32
tm.assert_series_equal(
result1,
frame.apply(wrapper, axis=1),
check_dtype=False,
rtol=rtol,
atol=atol,
)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(
result0,
frame.apply(skipna_wrapper),
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
)
if opname in ["sum", "prod"]:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(
result1, expected, check_dtype=False, rtol=rtol, atol=atol
)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match="No axis named 2"):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ["sum", "prod"]:
unit = 1 if opname == "prod" else 0 # result for empty sum/prod
expected = Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(
result1, frame.apply(wrapper, axis=1), check_dtype=False
) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(
result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False
)
# bad axis
with pytest.raises(ValueError, match="No axis named 2"):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == "any":
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(
opname, bool_frame_with_na, float_string_frame, has_bool_only=False
):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed["_bool_"] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics:
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api(
"count", float_frame, float_string_frame, has_numeric_only=True
)
assert_stat_op_api(
"sum", float_frame, float_string_frame, has_numeric_only=True
)
assert_stat_op_api("nunique", float_frame, float_string_frame)
assert_stat_op_api("mean", float_frame, float_string_frame)
assert_stat_op_api("product", float_frame, float_string_frame)
assert_stat_op_api("median", float_frame, float_string_frame)
assert_stat_op_api("min", float_frame, float_string_frame)
assert_stat_op_api("max", float_frame, float_string_frame)
assert_stat_op_api("mad", float_frame, float_string_frame)
assert_stat_op_api("var", float_frame, float_string_frame)
assert_stat_op_api("std", float_frame, float_string_frame)
assert_stat_op_api("sem", float_frame, float_string_frame)
assert_stat_op_api("median", float_frame, float_string_frame)
try:
from scipy.stats import kurtosis, skew # noqa:F401
assert_stat_op_api("skew", float_frame, float_string_frame)
assert_stat_op_api("kurt", float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc(
"nunique",
nunique,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
# GH#32571 check_less_precise is needed on apparently-random
# py37-npdev builds and OSX-PY36-min_version builds
# mixed types (with upcasting happening)
assert_stat_op_calc(
"sum",
np.sum,
mixed_float_frame.astype("float32"),
check_dtype=False,
rtol=1e-3,
)
assert_stat_op_calc(
"sum", np.sum, float_frame_with_na, skipna_alternative=np.nansum
)
assert_stat_op_calc("mean", np.mean, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"product", np.prod, float_frame_with_na, skipna_alternative=np.nanprod
)
assert_stat_op_calc("mad", mad, float_frame_with_na)
assert_stat_op_calc("var", var, float_frame_with_na)
assert_stat_op_calc("std", std, float_frame_with_na)
assert_stat_op_calc("sem", sem, float_frame_with_na)
assert_stat_op_calc(
"count",
count,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
try:
from scipy import kurtosis, skew # noqa:F401
assert_stat_op_calc("skew", skewness, float_frame_with_na)
assert_stat_op_calc("kurt", kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc("median", wrapper, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"median", wrapper, int_frame, check_dtype=False, check_dates=True
)
@pytest.mark.parametrize(
"method", ["sum", "mean", "prod", "var", "std", "skew", "min", "max"]
)
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
"a": [
-0.00049987540199591344,
-0.0016467257772919831,
0.00067695870775883013,
],
"b": [-0, -0, 0.0],
"c": [
0.00031111847529610595,
0.0014902627951905339,
-0.00094099200035979691,
],
}
df1 = DataFrame(data, index=["foo", "bar", "baz"], dtype="O")
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype("f8"), method)(1)
if method in ["sum", "prod"]:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["mean", "std", "var", "skew", "kurt", "sem"])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame(
{
"int": [1, 2, 3, 4],
"float": [1.0, 2.0, 3.0, 4.0],
"str": ["a", "b", "c", "d"],
}
)
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context("use_bottleneck", False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame(
{
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
"string_data": ["a", "b", "c", "d", "e"],
}
)
df.reindex(columns=["bool_data", "int_data", "string_data"])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(
test.values, np.array([2, 150, "abcde"], dtype=object)
)
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({"A": 1, "B": 3, "C": 2}))
tm.assert_series_equal(
df.nunique(dropna=False), Series({"A": 1, "B": 3, "C": 3})
)
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(
df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})
)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = DataFrame({"A": [1, 1], "B": [Timestamp("2000", tz=tz)] * 2})
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
expected = Series([1.0], index=["A"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_excludes_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = DataFrame({"A": [Timestamp("2000", tz=tz)] * 2})
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
expected = Series(dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_mean_mixed_string_decimal(self):
# GH 11670
# possible bug when calculating mean of DataFrame?
d = [
{"A": 2, "B": None, "C": Decimal("628.00")},
{"A": 1, "B": None, "C": Decimal("383.00")},
{"A": 3, "B": None, "C": Decimal("651.00")},
{"A": 2, "B": None, "C": Decimal("575.00")},
{"A": 4, "B": None, "C": Decimal("1114.00")},
{"A": 1, "B": "TEST", "C": Decimal("241.00")},
{"A": 2, "B": None, "C": Decimal("572.00")},
{"A": 4, "B": None, "C": Decimal("609.00")},
{"A": 3, "B": None, "C": Decimal("820.00")},
{"A": 5, "B": None, "C": Decimal("1223.00")},
]
df = DataFrame(d)
result = df.mean()
expected = Series([2.7, 681.6], index=["A", "C"])
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize("meth", ["sem", "var", "std"])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=["foo", "bar", "baz"])
# set one entry to a number in str format
df1.loc[0, "foo"] = "100"
df2 = DataFrame(np.random.randn(5, 3), columns=["foo", "bar", "baz"])
# set one entry to a non-number str
df2.loc[0, "foo"] = "a"
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs("bar")
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == "bar"
@pytest.mark.parametrize(
"dropna, expected",
[
(
True,
{
"A": [12],
"B": [10.0],
"C": [1.0],
"D": ["a"],
"E": Categorical(["a"], categories=["a"]),
"F": to_datetime(["2000-1-2"]),
"G": to_timedelta(["1 days"]),
},
),
(
False,
{
"A": [12],
"B": [10.0],
"C": [np.nan],
"D": np.array([np.nan], dtype=object),
"E": Categorical([np.nan], categories=["a"]),
"F": [pd.NaT],
"G": to_timedelta([pd.NaT]),
},
),
(
True,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]),
"L": to_datetime(["2000-1-2", "NaT", "NaT", "NaT"]),
"M": to_timedelta(["1 days", "nan", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
(
False,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]),
"L": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
"M": to_timedelta(["nan", "1 days", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
],
)
def test_mode_dropna(self, dropna, expected):
df = DataFrame(
{
"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, "a", np.nan],
"E": Categorical([np.nan, np.nan, "a", np.nan]),
"F": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
"G": to_timedelta(["1 days", "nan", "nan", "nan"]),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(["a", np.nan, "a", np.nan]),
"L": to_datetime(["2000-1-2", "2000-1-2", "NaT", "NaT"]),
"M": to_timedelta(["1 days", "nan", "1 days", "nan"]),
"N": np.arange(4, dtype="int64"),
}
)
result = df[sorted(expected.keys())].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, "a", "a"]})
expected = DataFrame({"A": ["a", np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by="A").reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_mode_empty_df(self):
df = DataFrame([], columns=["a", "b"])
result = df.mode()
expected = DataFrame([], columns=["a", "b"], index=Index([], dtype=int))
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(
{
"A": date_range("2012-1-1", periods=3, freq="D"),
"B": date_range("2012-1-2", periods=3, freq="D"),
"C": Timestamp("20120101") - timedelta(minutes=5, seconds=5),
}
)
diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]})
# min
result = diffs.min()
assert result[0] == diffs.loc[0, "A"]
assert result[1] == diffs.loc[0, "B"]
result = diffs.min(axis=1)
assert (result == diffs.loc[0, "B"]).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, "A"]
assert result[1] == diffs.loc[2, "B"]
result = diffs.max(axis=1)
assert (result == diffs["A"]).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]})
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed["C"] = "foo"
mixed["D"] = 1
mixed["E"] = 1.0
mixed["F"] = Timestamp("20130101")
# results in an object array
result = mixed.min()
expected = Series(
[
pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
"foo",
1,
1.0,
Timestamp("20130101"),
],
index=mixed.columns,
)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.0], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[["A", "B"]].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[["A", "B"]].min()
expected = Series(
[timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"]
)
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame(
{
"time": date_range("20130102", periods=5),
"time2": date_range("20130105", periods=5),
}
)
df["off1"] = df["time2"] - df["time"]
assert df["off1"].dtype == "timedelta64[ns]"
df["off2"] = df["time"] - df["time2"]
df._consolidate_inplace()
assert df["off1"].dtype == "timedelta64[ns]"
assert df["off2"].dtype == "timedelta64[ns]"
def test_std_timedelta64_skipna_false(self):
# GH#37392
tdi = pd.timedelta_range("1 Day", periods=10)
df = DataFrame({"A": tdi, "B": tdi})
df.iloc[-2, -1] = pd.NaT
result = df.std(skipna=False)
expected = Series(
[df["A"].std(), pd.NaT], index=["A", "B"], dtype="timedelta64[ns]"
)
tm.assert_series_equal(result, expected)
result = df.std(axis=1, skipna=False)
expected = Series([pd.Timedelta(0)] * 8 + [pd.NaT, pd.Timedelta(0)])
tm.assert_series_equal(result, expected)
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)])
def test_sum_prod_nanops(self, method, unit):
idx = ["a", "b", "c"]
df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = Series([unit, unit, unit], index=idx, dtype="float64")
# min_count=1
result = getattr(df, method)(min_count=1)
expected = Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = Series([unit, unit, unit], index=idx, dtype="float64")
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ["a", "b", "c"]
df = DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = Series([0, 0, 0], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_sum_mixed_datetime(self):
# GH#30886
df = DataFrame(
{"A": pd.date_range("2000", periods=4), "B": [1, 2, 3, 4]}
).reindex([2, 3, 4])
result = df.sum()
expected = Series({"B": 7.0})
tm.assert_series_equal(result, expected)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame["bool"] = float_frame["A"] > 0
means = float_frame.mean(0)
assert means["bool"] == float_frame["bool"].values.mean()
def test_mean_datetimelike(self):
# GH#24757 check that datetimelike are excluded by default, handled
# correctly with numeric_only=True
df = DataFrame(
{
"A": np.arange(3),
"B": pd.date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
"D": pd.period_range("2016", periods=3, freq="A"),
}
)
result = df.mean(numeric_only=True)
expected = Series({"A": 1.0})
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
# in the future datetime columns will be included
result = df.mean()
expected = Series({"A": 1.0, "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
def test_mean_datetimelike_numeric_only_false(self):
df = DataFrame(
{
"A": np.arange(3),
"B": pd.date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
}
)
# datetime(tz) and timedelta work
result = df.mean(numeric_only=False)
expected = Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
# mean of period is not allowed
df["D"] = pd.period_range("2016", periods=3, freq="A")
with pytest.raises(TypeError, match="mean is not implemented for Period"):
df.mean(numeric_only=False)
def test_mean_extensionarray_numeric_only_true(self):
# https://github.com/pandas-dev/pandas/issues/33256
arr = np.random.randint(1000, size=(10, 5))
df = DataFrame(arr, dtype="Int64")
result = df.mean(numeric_only=True)
expected = DataFrame(arr).mean()
tm.assert_series_equal(result, expected)
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=range(1), columns=range(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis, skipna=skipna)
tm.assert_series_equal(result, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis, skipna=skipna)
tm.assert_series_equal(result, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
def test_idxmax_mixed_dtype(self):
# don't cast to object, which would raise in nanops
dti = pd.date_range("2016-01-01", periods=3)
df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti})
result = df.idxmax()
expected = Series([1, 0, 2], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
result = df.idxmin()
expected = Series([0, 2, 0], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize("opname", ["any", "all"])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(
opname, getattr(np, opname), bool_frame_with_na, has_skipna=True
)
assert_bool_op_api(
opname, bool_frame_with_na, float_string_frame, has_bool_only=True
)
def test_any_all_extra(self):
df = DataFrame(
{
"A": [True, False, False],
"B": [True, True, False],
"C": [True, True, True],
},
index=["a", "b", "c"],
)
result = df[["A", "B"]].any(1)
expected = Series([True, True, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df[["A", "B"]].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[["C"]].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [
Timestamp("1960-02-15"),
Timestamp("1960-02-16"),
pd.NaT,
pd.NaT,
]
df = DataFrame({"A": float_data, "B": datetime_data})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
# GH 25101
df = DataFrame(
{"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]}
)
result = df.all(bool_only=True)
expected = Series(dtype=np.bool_)
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True],
}
)
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, data, expected",
[
(np.any, {}, False),
(np.all, {}, True),
(np.any, {"A": []}, False),
(np.all, {"A": []}, True),
(np.any, {"A": [False, False]}, False),
(np.all, {"A": [False, False]}, False),
(np.any, {"A": [True, False]}, True),
(np.all, {"A": [True, False]}, False),
(np.any, {"A": [True, True]}, True),
(np.all, {"A": [True, True]}, True),
(np.any, {"A": [False], "B": [False]}, False),
(np.all, {"A": [False], "B": [False]}, False),
(np.any, {"A": [False, False], "B": [False, True]}, True),
(np.all, {"A": [False, False], "B": [False, True]}, False),
# other types
(np.all, {"A": Series([0.0, 1.0], dtype="float")}, False),
(np.any, {"A": Series([0.0, 1.0], dtype="float")}, True),
(np.all, {"A": Series([0, 1], dtype=int)}, False),
(np.any, {"A": Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns]")}, False),
pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, False),
pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),
pytest.param(np.all, {"A": Series([0, 1], dtype="m8[ns]")}, False),
pytest.param(np.any, {"A": Series([0, 1], dtype="m8[ns]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="m8[ns]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="m8[ns]")}, True),
# np.all on Categorical raises, so the reduction drops the
# column, so all is being done on an empty Series, so is True
(np.all, {"A": Series([0, 1], dtype="category")}, True),
(np.any, {"A": Series([0, 1], dtype="category")}, False),
(np.all, {"A": Series([1, 2], dtype="category")}, True),
(np.any, {"A": Series([1, 2], dtype="category")}, False),
# Mix GH#21484
pytest.param(
np.all,
{
"A": Series([10, 20], dtype="M8[ns]"),
"B": Series([10, 20], dtype="m8[ns]"),
},
True,
),
],
)
def test_any_all_np_func(self, func, data, expected):
# GH 19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# GH 19976
result = np.all(DataFrame(columns=["a", "b"])).item()
assert result is True
result = np.any(DataFrame(columns=["a", "b"])).item()
assert result is False
def test_any_all_object_bool_only(self):
df = DataFrame({"A": ["foo", 2], "B": [True, False]}).astype(object)
df._consolidate_inplace()
df["C"] = Series([True, True])
# The underlying bug is in DataFrame._get_bool_data, so we check
# that while we're here
res = df._get_bool_data()
expected = df[["B", "C"]]
tm.assert_frame_equal(res, expected)
res = df.all(bool_only=True, axis=0)
expected = Series([False, True], index=["B", "C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
res = df[["B", "C"]].all(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
assert not df.all(bool_only=True, axis=None)
res = df.any(bool_only=True, axis=0)
expected = Series([True, True], index=["B", "C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
res = df[["B", "C"]].any(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
assert df.any(bool_only=True, axis=None)
@pytest.mark.parametrize("method", ["any", "all"])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product(
[["A", "B"], ["a", "b"]], names=["out", "in"]
),
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level="out")
# ---------------------------------------------------------------------
# Unsorted
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({"A": [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip(lower=s, axis=0)
for op in ["lt", "le", "gt", "ge", "eq", "ne"]:
getattr(df, op)(s_nan, axis=0)
class TestDataFrameReductions:
def test_min_max_dt64_with_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = DataFrame({"foo": [pd.NaT, pd.NaT, Timestamp("2012-05-01")]})
res = df.min()
exp = Series([Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = Series([Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = DataFrame({"foo": [pd.NaT, pd.NaT]})
res = df.min()
exp = Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture):
# GH#36907
tz = tz_naive_fixture
if isinstance(tz, tzlocal) and is_platform_windows():
request.node.add_marker(
pytest.mark.xfail(
reason="GH#37659 OSError raised within tzlocal bc Windows "
"chokes in times before 1970-01-01"
)
)
df = DataFrame(
{
"a": [
Timestamp("2020-01-01 08:00:00", tz=tz),
Timestamp("1920-02-01 09:00:00", tz=tz),
],
"b": [Timestamp("2020-02-01 08:00:00", tz=tz), pd.NaT],
}
)
res = df.min(axis=1, skipna=False)
expected = Series([df.loc[0, "a"], pd.NaT])
assert expected.dtype == df["a"].dtype
tm.assert_series_equal(res, expected)
res = df.max(axis=1, skipna=False)
expected = Series([df.loc[0, "b"], pd.NaT])
assert expected.dtype == df["a"].dtype
tm.assert_series_equal(res, expected)
def test_min_max_dt64_api_consistency_with_NaT(self):
# Calling the following sum functions returned an error for dataframes but
# returned NaT for series. These tests check that the API is consistent in
# min/max calls on empty Series/DataFrames. See GH:33704 for more
# information
df = DataFrame({"x": pd.to_datetime([])})
expected_dt_series = Series(pd.to_datetime([]))
# check axis 0
assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT)
assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT)
# check axis 1
tm.assert_series_equal(df.min(axis=1), expected_dt_series)
tm.assert_series_equal(df.max(axis=1), expected_dt_series)
def test_min_max_dt64_api_consistency_empty_df(self):
# check DataFrame/Series api consistency when calling min/max on an empty
# DataFrame/Series.
df = DataFrame({"x": []})
expected_float_series = Series([], dtype=float)
# check axis 0
assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min())
assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max())
# check axis 1
tm.assert_series_equal(df.min(axis=1), expected_float_series)
tm.assert_series_equal(df.min(axis=1), expected_float_series)
@pytest.mark.parametrize(
"initial",
["2018-10-08 13:36:45+00:00", "2018-10-08 13:36:45+03:00"], # Non-UTC timezone
)
@pytest.mark.parametrize("method", ["min", "max"])
def test_preserve_timezone(self, initial: str, method):
# GH 28552
initial_dt = pd.to_datetime(initial)
expected = Series([initial_dt])
df = DataFrame([expected])
result = getattr(df, method)(axis=1)
tm.assert_series_equal(result, expected)
def test_frame_any_all_with_level(self):
df = DataFrame(
{"data": [False, False, True, False, True, False, True]},
index=[
["one", "one", "two", "one", "two", "two", "two"],
[0, 1, 0, 2, 1, 2, 3],
],
)
result = df.any(level=0)
ex = DataFrame({"data": [False, True]}, index=["one", "two"])
tm.assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({"data": [False, False]}, index=["one", "two"])
tm.assert_frame_equal(result, ex)
def test_frame_any_with_timedelta(self):
# GH#17667
df = DataFrame(
{
"a": Series([0, 0]),
"t": Series([pd.to_timedelta(0, "s"), pd.to_timedelta(1, "ms")]),
}
)
result = df.any(axis=0)
expected = Series(data=[False, True], index=["a", "t"])
tm.assert_series_equal(result, expected)
result = df.any(axis=1)
expected = Series(data=[False, True])
tm.assert_series_equal(result, expected)
class TestNuisanceColumns:
@pytest.mark.parametrize("method", ["any", "all"])
def test_any_all_categorical_dtype_nuisance_column(self, method):
# GH#36076 DataFrame should match Series behavior
ser = Series([0, 1], dtype="category", name="A")
df = ser.to_frame()
# Double-check the Series behavior is to raise
with pytest.raises(TypeError, match="does not implement reduction"):
getattr(ser, method)()
with pytest.raises(TypeError, match="does not implement reduction"):
getattr(np, method)(ser)
with pytest.raises(TypeError, match="does not implement reduction"):
getattr(df, method)(bool_only=False)
# With bool_only=None, operating on this column raises and is ignored,
# so we expect an empty result.
result = getattr(df, method)(bool_only=None)
expected = Series([], index=Index([]), dtype=bool)
tm.assert_series_equal(result, expected)
result = getattr(np, method)(df, axis=0)
tm.assert_series_equal(result, expected)
def test_median_categorical_dtype_nuisance_column(self):
# GH#21020 DataFrame.median should match Series.median
df = DataFrame({"A": Categorical([1, 2, 2, 2, 3])})
ser = df["A"]
# Double-check the Series behavior is to raise
with pytest.raises(TypeError, match="does not implement reduction"):
ser.median()
with pytest.raises(TypeError, match="does not implement reduction"):
df.median(numeric_only=False)
result = df.median()
expected = Series([], index=Index([]), dtype=np.float64)
tm.assert_series_equal(result, expected)
# same thing, but with an additional non-categorical column
df["B"] = df["A"].astype(int)
with pytest.raises(TypeError, match="does not implement reduction"):
df.median(numeric_only=False)
result = df.median()
expected = Series([2.0], index=["B"])
tm.assert_series_equal(result, expected)
# TODO: np.median(df, axis=0) gives np.array([2.0, 2.0]) instead
# of expected.values
@pytest.mark.parametrize("method", ["min", "max"])
def test_min_max_categorical_dtype_non_ordered_nuisance_column(self, method):
# GH#28949 DataFrame.min should behave like Series.min
cat = Categorical(["a", "b", "c", "b"], ordered=False)
ser = Series(cat)
df = ser.to_frame("A")
# Double-check the Series behavior
with pytest.raises(TypeError, match="is not ordered for operation"):
getattr(ser, method)()
with pytest.raises(TypeError, match="is not ordered for operation"):
getattr(np, method)(ser)
with pytest.raises(TypeError, match="is not ordered for operation"):
getattr(df, method)(numeric_only=False)
result = getattr(df, method)()
expected = Series([], index=Index([]), dtype=np.float64)
tm.assert_series_equal(result, expected)
result = getattr(np, method)(df)
tm.assert_series_equal(result, expected)
# same thing, but with an additional non-categorical column
df["B"] = df["A"].astype(object)
result = getattr(df, method)()
if method == "min":
expected = Series(["a"], index=["B"])
else:
expected = Series(["c"], index=["B"])
tm.assert_series_equal(result, expected)
result = getattr(np, method)(df)
tm.assert_series_equal(result, expected)
def test_reduction_object_block_splits_nuisance_columns(self):
# GH#37827
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", "c"]}, dtype=object)
# We should only exclude "B", not "A"
result = df.mean()
expected = Series([1.0], index=["A"])
tm.assert_series_equal(result, expected)
# Same behavior but heterogeneous dtype
df["C"] = df["A"].astype(int) + 4
result = df.mean()
expected = Series([1.0, 5.0], index=["A", "C"])
tm.assert_series_equal(result, expected)
def test_sum_timedelta64_skipna_false():
# GH#17235
arr = np.arange(8).astype(np.int64).view("m8[s]").reshape(4, 2)
arr[-1, -1] = "Nat"
df = DataFrame(arr)
result = df.sum(skipna=False)
expected = Series([pd.Timedelta(seconds=12), pd.NaT])
tm.assert_series_equal(result, expected)
result = df.sum(axis=0, skipna=False)
tm.assert_series_equal(result, expected)
result = df.sum(axis=1, skipna=False)
expected = Series(
[
pd.Timedelta(seconds=1),
pd.Timedelta(seconds=5),
pd.Timedelta(seconds=9),
pd.NaT,
]
)
tm.assert_series_equal(result, expected)
def test_mixed_frame_with_integer_sum():
# https://github.com/pandas-dev/pandas/issues/34520
df = DataFrame([["a", 1]], columns=list("ab"))
df = df.astype({"b": "Int64"})
result = df.sum()
expected = Series(["a", 1], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numeric_only", [True, False, None])
@pytest.mark.parametrize("method", ["min", "max"])
def test_minmax_extensionarray(method, numeric_only):
# https://github.com/pandas-dev/pandas/issues/32651
int64_info = np.iinfo("int64")
ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype())
df = DataFrame({"Int64": ser})
result = getattr(df, method)(numeric_only=numeric_only)
expected = Series(
[getattr(int64_info, method)], index=Index(["Int64"], dtype="object")
)
tm.assert_series_equal(result, expected)
| 35.446154 | 88 | 0.553744 | from datetime import timedelta
from decimal import Decimal
from dateutil.tz import tzlocal
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
def assert_stat_op_calc(
opname,
alternative,
frame,
has_skipna=True,
check_dtype=True,
check_dates=False,
rtol=1e-5,
atol=1e-8,
skipna_alternative=None,
):
f = getattr(frame, opname)
if check_dates:
expected_warning = FutureWarning if opname in ["mean", "median"] else None
df = DataFrame({"b": date_range("1/1/2001", periods=2)})
with tm.assert_produces_warning(expected_warning):
result = getattr(df, opname)()
assert isinstance(result, Series)
df["a"] = range(len(df))
with tm.assert_produces_warning(expected_warning):
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative, skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(
result0, frame.apply(wrapper), check_dtype=check_dtype, rtol=rtol, atol=atol
)
tm.assert_series_equal(
result1,
frame.apply(wrapper, axis=1),
check_dtype=False,
rtol=rtol,
atol=atol,
)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(
result0,
frame.apply(skipna_wrapper),
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
)
if opname in ["sum", "prod"]:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(
result1, expected, check_dtype=False, rtol=rtol, atol=atol
)
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
with pytest.raises(ValueError, match="No axis named 2"):
f(axis=2)
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ["sum", "prod"]:
unit = 1 if opname == "prod" else 0
expected = Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame, has_numeric_only=False):
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(
result1, frame.apply(wrapper, axis=1), check_dtype=False
)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(
result1, frame.apply(skipna_wrapper, axis=1), check_dtype=False
)
with pytest.raises(ValueError, match="No axis named 2"):
f(axis=2)
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == "any":
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(
opname, bool_frame_with_na, float_string_frame, has_bool_only=False
):
mixed = float_string_frame
mixed["_bool_"] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics:
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api(
"count", float_frame, float_string_frame, has_numeric_only=True
)
assert_stat_op_api(
"sum", float_frame, float_string_frame, has_numeric_only=True
)
assert_stat_op_api("nunique", float_frame, float_string_frame)
assert_stat_op_api("mean", float_frame, float_string_frame)
assert_stat_op_api("product", float_frame, float_string_frame)
assert_stat_op_api("median", float_frame, float_string_frame)
assert_stat_op_api("min", float_frame, float_string_frame)
assert_stat_op_api("max", float_frame, float_string_frame)
assert_stat_op_api("mad", float_frame, float_string_frame)
assert_stat_op_api("var", float_frame, float_string_frame)
assert_stat_op_api("std", float_frame, float_string_frame)
assert_stat_op_api("sem", float_frame, float_string_frame)
assert_stat_op_api("median", float_frame, float_string_frame)
try:
from scipy.stats import kurtosis, skew
assert_stat_op_api("skew", float_frame, float_string_frame)
assert_stat_op_api("kurt", float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc(
"nunique",
nunique,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
"sum",
np.sum,
mixed_float_frame.astype("float32"),
check_dtype=False,
rtol=1e-3,
)
assert_stat_op_calc(
"sum", np.sum, float_frame_with_na, skipna_alternative=np.nansum
)
assert_stat_op_calc("mean", np.mean, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"product", np.prod, float_frame_with_na, skipna_alternative=np.nanprod
)
assert_stat_op_calc("mad", mad, float_frame_with_na)
assert_stat_op_calc("var", var, float_frame_with_na)
assert_stat_op_calc("std", std, float_frame_with_na)
assert_stat_op_calc("sem", sem, float_frame_with_na)
assert_stat_op_calc(
"count",
count,
float_frame_with_na,
has_skipna=False,
check_dtype=False,
check_dates=True,
)
try:
from scipy import kurtosis, skew
assert_stat_op_calc("skew", skewness, float_frame_with_na)
assert_stat_op_calc("kurt", kurt, float_frame_with_na)
except ImportError:
pass
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc("median", wrapper, float_frame_with_na, check_dates=True)
assert_stat_op_calc(
"median", wrapper, int_frame, check_dtype=False, check_dates=True
)
@pytest.mark.parametrize(
"method", ["sum", "mean", "prod", "var", "std", "skew", "min", "max"]
)
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
"a": [
-0.00049987540199591344,
-0.0016467257772919831,
0.00067695870775883013,
],
"b": [-0, -0, 0.0],
"c": [
0.00031111847529610595,
0.0014902627951905339,
-0.00094099200035979691,
],
}
df1 = DataFrame(data, index=["foo", "bar", "baz"], dtype="O")
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3], 2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype("f8"), method)(1)
if method in ["sum", "prod"]:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("op", ["mean", "std", "var", "skew", "kurt", "sem"])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame(
{
"int": [1, 2, 3, 4],
"float": [1.0, 2.0, 3.0, 4.0],
"str": ["a", "b", "c", "d"],
}
)
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context("use_bottleneck", False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame(
{
"bool_data": [True, True, False, False, False],
"int_data": [10, 20, 30, 40, 50],
"string_data": ["a", "b", "c", "d", "e"],
}
)
df.reindex(columns=["bool_data", "int_data", "string_data"])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(
test.values, np.array([2, 150, "abcde"], dtype=object)
)
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({"A": [1, 1, 1], "B": [1, 2, 3], "C": [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({"A": 1, "B": 3, "C": 2}))
tm.assert_series_equal(
df.nunique(dropna=False), Series({"A": 1, "B": 3, "C": 3})
)
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(
df.nunique(axis=1, dropna=False), Series({0: 1, 1: 3, 2: 2})
)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = DataFrame({"A": [1, 1], "B": [Timestamp("2000", tz=tz)] * 2})
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
expected = Series([1.0], index=["A"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_mean_excludes_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = DataFrame({"A": [Timestamp("2000", tz=tz)] * 2})
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
expected = Series(dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_mean_mixed_string_decimal(self):
# GH 11670
# possible bug when calculating mean of DataFrame?
d = [
{"A": 2, "B": None, "C": Decimal("628.00")},
{"A": 1, "B": None, "C": Decimal("383.00")},
{"A": 3, "B": None, "C": Decimal("651.00")},
{"A": 2, "B": None, "C": Decimal("575.00")},
{"A": 4, "B": None, "C": Decimal("1114.00")},
{"A": 1, "B": "TEST", "C": Decimal("241.00")},
{"A": 2, "B": None, "C": Decimal("572.00")},
{"A": 4, "B": None, "C": Decimal("609.00")},
{"A": 3, "B": None, "C": Decimal("820.00")},
{"A": 5, "B": None, "C": Decimal("1223.00")},
]
df = DataFrame(d)
result = df.mean()
expected = Series([2.7, 681.6], index=["A", "C"])
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize("meth", ["sem", "var", "std"])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=["foo", "bar", "baz"])
# set one entry to a number in str format
df1.loc[0, "foo"] = "100"
df2 = DataFrame(np.random.randn(5, 3), columns=["foo", "bar", "baz"])
# set one entry to a non-number str
df2.loc[0, "foo"] = "a"
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[["bar", "baz"]], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context("use_bottleneck", False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
)
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs("bar")
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == "bar"
@pytest.mark.parametrize(
"dropna, expected",
[
(
True,
{
"A": [12],
"B": [10.0],
"C": [1.0],
"D": ["a"],
"E": Categorical(["a"], categories=["a"]),
"F": to_datetime(["2000-1-2"]),
"G": to_timedelta(["1 days"]),
},
),
(
False,
{
"A": [12],
"B": [10.0],
"C": [np.nan],
"D": np.array([np.nan], dtype=object),
"E": Categorical([np.nan], categories=["a"]),
"F": [pd.NaT],
"G": to_timedelta([pd.NaT]),
},
),
(
True,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical(["a", np.nan, np.nan, np.nan], categories=["a"]),
"L": to_datetime(["2000-1-2", "NaT", "NaT", "NaT"]),
"M": to_timedelta(["1 days", "nan", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
(
False,
{
"H": [8, 9, np.nan, np.nan],
"I": [8, 9, np.nan, np.nan],
"J": [1, np.nan, np.nan, np.nan],
"K": Categorical([np.nan, "a", np.nan, np.nan], categories=["a"]),
"L": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
"M": to_timedelta(["nan", "1 days", "nan", "nan"]),
"N": [0, 1, 2, 3],
},
),
],
)
def test_mode_dropna(self, dropna, expected):
df = DataFrame(
{
"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, "a", np.nan],
"E": Categorical([np.nan, np.nan, "a", np.nan]),
"F": to_datetime(["NaT", "2000-1-2", "NaT", "NaT"]),
"G": to_timedelta(["1 days", "nan", "nan", "nan"]),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(["a", np.nan, "a", np.nan]),
"L": to_datetime(["2000-1-2", "2000-1-2", "NaT", "NaT"]),
"M": to_timedelta(["1 days", "nan", "1 days", "nan"]),
"N": np.arange(4, dtype="int64"),
}
)
result = df[sorted(expected.keys())].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, "a", "a"]})
expected = DataFrame({"A": ["a", np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by="A").reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_mode_empty_df(self):
df = DataFrame([], columns=["a", "b"])
result = df.mode()
expected = DataFrame([], columns=["a", "b"], index=Index([], dtype=int))
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(
{
"A": date_range("2012-1-1", periods=3, freq="D"),
"B": date_range("2012-1-2", periods=3, freq="D"),
"C": Timestamp("20120101") - timedelta(minutes=5, seconds=5),
}
)
diffs = DataFrame({"A": df["A"] - df["C"], "B": df["A"] - df["B"]})
# min
result = diffs.min()
assert result[0] == diffs.loc[0, "A"]
assert result[1] == diffs.loc[0, "B"]
result = diffs.min(axis=1)
assert (result == diffs.loc[0, "B"]).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, "A"]
assert result[1] == diffs.loc[2, "B"]
result = diffs.max(axis=1)
assert (result == diffs["A"]).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame({"A": df["A"] - df["C"], "B": df["B"] - df["A"]})
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed["C"] = "foo"
mixed["D"] = 1
mixed["E"] = 1.0
mixed["F"] = Timestamp("20130101")
# results in an object array
result = mixed.min()
expected = Series(
[
pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
"foo",
1,
1.0,
Timestamp("20130101"),
],
index=mixed.columns,
)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.0], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[["A", "B"]].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[["A", "B"]].min()
expected = Series(
[timedelta(seconds=5 * 60 + 5), timedelta(days=-1)], index=["A", "B"]
)
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame(
{
"time": date_range("20130102", periods=5),
"time2": date_range("20130105", periods=5),
}
)
df["off1"] = df["time2"] - df["time"]
assert df["off1"].dtype == "timedelta64[ns]"
df["off2"] = df["time"] - df["time2"]
df._consolidate_inplace()
assert df["off1"].dtype == "timedelta64[ns]"
assert df["off2"].dtype == "timedelta64[ns]"
def test_std_timedelta64_skipna_false(self):
# GH#37392
tdi = pd.timedelta_range("1 Day", periods=10)
df = DataFrame({"A": tdi, "B": tdi})
df.iloc[-2, -1] = pd.NaT
result = df.std(skipna=False)
expected = Series(
[df["A"].std(), pd.NaT], index=["A", "B"], dtype="timedelta64[ns]"
)
tm.assert_series_equal(result, expected)
result = df.std(axis=1, skipna=False)
expected = Series([pd.Timedelta(0)] * 8 + [pd.NaT, pd.Timedelta(0)])
tm.assert_series_equal(result, expected)
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize("method, unit", [("sum", 0), ("prod", 1)])
def test_sum_prod_nanops(self, method, unit):
idx = ["a", "b", "c"]
df = DataFrame({"a": [unit, unit], "b": [unit, np.nan], "c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = Series([unit, unit, unit], index=idx, dtype="float64")
# min_count=1
result = getattr(df, method)(min_count=1)
expected = Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = Series([unit, unit, unit], index=idx, dtype="float64")
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = Series(result, index=["A", "B"])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ["a", "b", "c"]
df = DataFrame({"a": [0, 0], "b": [0, np.nan], "c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
result = df2.sum()
expected = Series([0, 0, 0], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
result = df2.sum(min_count=1)
expected = Series([0, 0, np.nan], dtype="m8[ns]", index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index, columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_sum_mixed_datetime(self):
df = DataFrame(
{"A": pd.date_range("2000", periods=4), "B": [1, 2, 3, 4]}
).reindex([2, 3, 4])
result = df.sum()
expected = Series({"B": 7.0})
tm.assert_series_equal(result, expected)
def test_mean_corner(self, float_frame, float_string_frame):
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
float_frame["bool"] = float_frame["A"] > 0
means = float_frame.mean(0)
assert means["bool"] == float_frame["bool"].values.mean()
def test_mean_datetimelike(self):
"A": np.arange(3),
"B": pd.date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
"D": pd.period_range("2016", periods=3, freq="A"),
}
)
result = df.mean(numeric_only=True)
expected = Series({"A": 1.0})
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = df.mean()
expected = Series({"A": 1.0, "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
def test_mean_datetimelike_numeric_only_false(self):
df = DataFrame(
{
"A": np.arange(3),
"B": pd.date_range("2016-01-01", periods=3),
"C": pd.timedelta_range("1D", periods=3),
}
)
result = df.mean(numeric_only=False)
expected = Series({"A": 1, "B": df.loc[1, "B"], "C": df.loc[1, "C"]})
tm.assert_series_equal(result, expected)
df["D"] = pd.period_range("2016", periods=3, freq="A")
with pytest.raises(TypeError, match="mean is not implemented for Period"):
df.mean(numeric_only=False)
def test_mean_extensionarray_numeric_only_true(self):
arr = np.random.randint(1000, size=(10, 5))
df = DataFrame(arr, dtype="Int64")
result = df.mean(numeric_only=True)
expected = DataFrame(arr).mean()
tm.assert_series_equal(result, expected)
def test_stats_mixed_type(self, float_string_frame):
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=range(1), columns=range(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis, skipna=skipna)
tm.assert_series_equal(result, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.iloc[5:10] = np.nan
frame.iloc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis, skipna=skipna)
tm.assert_series_equal(result, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
def test_idxmax_mixed_dtype(self):
# don't cast to object, which would raise in nanops
dti = pd.date_range("2016-01-01", periods=3)
df = DataFrame({1: [0, 2, 1], 2: range(3)[::-1], 3: dti})
result = df.idxmax()
expected = Series([1, 0, 2], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
result = df.idxmin()
expected = Series([0, 2, 0], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("opname", ["any", "all"])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(
opname, getattr(np, opname), bool_frame_with_na, has_skipna=True
)
assert_bool_op_api(
opname, bool_frame_with_na, float_string_frame, has_bool_only=True
)
def test_any_all_extra(self):
df = DataFrame(
{
"A": [True, False, False],
"B": [True, True, False],
"C": [True, True, True],
},
index=["a", "b", "c"],
)
result = df[["A", "B"]].any(1)
expected = Series([True, True, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df[["A", "B"]].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[["C"]].all(axis=None).item()
assert result is True
def test_any_datetime(self):
float_data = [1, np.nan, 3, np.nan]
datetime_data = [
Timestamp("1960-02-15"),
Timestamp("1960-02-16"),
pd.NaT,
pd.NaT,
]
df = DataFrame({"A": float_data, "B": datetime_data})
result = df.any(1)
expected = Series([True, True, True, False])
tm.assert_series_equal(result, expected)
def test_any_all_bool_only(self):
df = DataFrame(
{"col1": [1, 2, 3], "col2": [4, 5, 6], "col3": [None, None, None]}
)
result = df.all(bool_only=True)
expected = Series(dtype=np.bool_)
tm.assert_series_equal(result, expected)
df = DataFrame(
{
"col1": [1, 2, 3],
"col2": [4, 5, 6],
"col3": [None, None, None],
"col4": [False, False, True],
}
)
result = df.all(bool_only=True)
expected = Series({"col4": False})
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, data, expected",
[
(np.any, {}, False),
(np.all, {}, True),
(np.any, {"A": []}, False),
(np.all, {"A": []}, True),
(np.any, {"A": [False, False]}, False),
(np.all, {"A": [False, False]}, False),
(np.any, {"A": [True, False]}, True),
(np.all, {"A": [True, False]}, False),
(np.any, {"A": [True, True]}, True),
(np.all, {"A": [True, True]}, True),
(np.any, {"A": [False], "B": [False]}, False),
(np.all, {"A": [False], "B": [False]}, False),
(np.any, {"A": [False, False], "B": [False, True]}, True),
(np.all, {"A": [False, False], "B": [False, True]}, False),
(np.all, {"A": Series([0.0, 1.0], dtype="float")}, False),
(np.any, {"A": Series([0.0, 1.0], dtype="float")}, True),
(np.all, {"A": Series([0, 1], dtype=int)}, False),
(np.any, {"A": Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns]")}, False),
pytest.param(np.all, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, False),
pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": Series([0, 1], dtype="M8[ns, UTC]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="M8[ns, UTC]")}, True),
pytest.param(np.all, {"A": Series([0, 1], dtype="m8[ns]")}, False),
pytest.param(np.any, {"A": Series([0, 1], dtype="m8[ns]")}, True),
pytest.param(np.all, {"A": Series([1, 2], dtype="m8[ns]")}, True),
pytest.param(np.any, {"A": Series([1, 2], dtype="m8[ns]")}, True),
(np.all, {"A": Series([0, 1], dtype="category")}, True),
(np.any, {"A": Series([0, 1], dtype="category")}, False),
(np.all, {"A": Series([1, 2], dtype="category")}, True),
(np.any, {"A": Series([1, 2], dtype="category")}, False),
pytest.param(
np.all,
{
"A": Series([10, 20], dtype="M8[ns]"),
"B": Series([10, 20], dtype="m8[ns]"),
},
True,
),
],
)
def test_any_all_np_func(self, func, data, expected):
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
result = np.all(DataFrame(columns=["a", "b"])).item()
assert result is True
result = np.any(DataFrame(columns=["a", "b"])).item()
assert result is False
def test_any_all_object_bool_only(self):
df = DataFrame({"A": ["foo", 2], "B": [True, False]}).astype(object)
df._consolidate_inplace()
df["C"] = Series([True, True])
res = df._get_bool_data()
expected = df[["B", "C"]]
tm.assert_frame_equal(res, expected)
res = df.all(bool_only=True, axis=0)
expected = Series([False, True], index=["B", "C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
res = df[["B", "C"]].all(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
assert not df.all(bool_only=True, axis=None)
res = df.any(bool_only=True, axis=0)
expected = Series([True, True], index=["B", "C"])
tm.assert_series_equal(res, expected)
# operating on a subset of columns should not produce a _larger_ Series
res = df[["B", "C"]].any(bool_only=True, axis=0)
tm.assert_series_equal(res, expected)
assert df.any(bool_only=True, axis=None)
@pytest.mark.parametrize("method", ["any", "all"])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product(
[["A", "B"], ["a", "b"]], names=["out", "in"]
),
)
xpr = "Must specify 'axis' when aggregating by level."
with pytest.raises(ValueError, match=xpr):
getattr(df, method)(axis=None, level="out")
# ---------------------------------------------------------------------
# Unsorted
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({"A": [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip(lower=s, axis=0)
for op in ["lt", "le", "gt", "ge", "eq", "ne"]:
getattr(df, op)(s_nan, axis=0)
class TestDataFrameReductions:
def test_min_max_dt64_with_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = DataFrame({"foo": [pd.NaT, pd.NaT, Timestamp("2012-05-01")]})
res = df.min()
exp = Series([Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = Series([Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = DataFrame({"foo": [pd.NaT, pd.NaT]})
res = df.min()
exp = Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_min_max_dt64_with_NaT_skipna_false(self, request, tz_naive_fixture):
# GH#36907
tz = tz_naive_fixture
if isinstance(tz, tzlocal) and is_platform_windows():
request.node.add_marker(
pytest.mark.xfail(
reason="GH#37659 OSError raised within tzlocal bc Windows "
"chokes in times before 1970-01-01"
)
)
df = DataFrame(
{
"a": [
Timestamp("2020-01-01 08:00:00", tz=tz),
Timestamp("1920-02-01 09:00:00", tz=tz),
],
"b": [Timestamp("2020-02-01 08:00:00", tz=tz), pd.NaT],
}
)
res = df.min(axis=1, skipna=False)
expected = Series([df.loc[0, "a"], pd.NaT])
assert expected.dtype == df["a"].dtype
tm.assert_series_equal(res, expected)
res = df.max(axis=1, skipna=False)
expected = Series([df.loc[0, "b"], pd.NaT])
assert expected.dtype == df["a"].dtype
tm.assert_series_equal(res, expected)
def test_min_max_dt64_api_consistency_with_NaT(self):
# Calling the following sum functions returned an error for dataframes but
# returned NaT for series. These tests check that the API is consistent in
# min/max calls on empty Series/DataFrames. See GH:33704 for more
# information
df = DataFrame({"x": pd.to_datetime([])})
expected_dt_series = Series(pd.to_datetime([]))
# check axis 0
assert (df.min(axis=0).x is pd.NaT) == (expected_dt_series.min() is pd.NaT)
assert (df.max(axis=0).x is pd.NaT) == (expected_dt_series.max() is pd.NaT)
# check axis 1
tm.assert_series_equal(df.min(axis=1), expected_dt_series)
tm.assert_series_equal(df.max(axis=1), expected_dt_series)
def test_min_max_dt64_api_consistency_empty_df(self):
# check DataFrame/Series api consistency when calling min/max on an empty
# DataFrame/Series.
df = DataFrame({"x": []})
expected_float_series = Series([], dtype=float)
# check axis 0
assert np.isnan(df.min(axis=0).x) == np.isnan(expected_float_series.min())
assert np.isnan(df.max(axis=0).x) == np.isnan(expected_float_series.max())
# check axis 1
tm.assert_series_equal(df.min(axis=1), expected_float_series)
tm.assert_series_equal(df.min(axis=1), expected_float_series)
@pytest.mark.parametrize(
"initial",
["2018-10-08 13:36:45+00:00", "2018-10-08 13:36:45+03:00"], # Non-UTC timezone
)
@pytest.mark.parametrize("method", ["min", "max"])
def test_preserve_timezone(self, initial: str, method):
# GH 28552
initial_dt = pd.to_datetime(initial)
expected = Series([initial_dt])
df = DataFrame([expected])
result = getattr(df, method)(axis=1)
tm.assert_series_equal(result, expected)
def test_frame_any_all_with_level(self):
df = DataFrame(
{"data": [False, False, True, False, True, False, True]},
index=[
["one", "one", "two", "one", "two", "two", "two"],
[0, 1, 0, 2, 1, 2, 3],
],
)
result = df.any(level=0)
ex = DataFrame({"data": [False, True]}, index=["one", "two"])
tm.assert_frame_equal(result, ex)
result = df.all(level=0)
ex = DataFrame({"data": [False, False]}, index=["one", "two"])
tm.assert_frame_equal(result, ex)
def test_frame_any_with_timedelta(self):
# GH#17667
df = DataFrame(
{
"a": Series([0, 0]),
"t": Series([pd.to_timedelta(0, "s"), pd.to_timedelta(1, "ms")]),
}
)
result = df.any(axis=0)
expected = Series(data=[False, True], index=["a", "t"])
tm.assert_series_equal(result, expected)
result = df.any(axis=1)
expected = Series(data=[False, True])
tm.assert_series_equal(result, expected)
class TestNuisanceColumns:
@pytest.mark.parametrize("method", ["any", "all"])
def test_any_all_categorical_dtype_nuisance_column(self, method):
# GH#36076 DataFrame should match Series behavior
ser = Series([0, 1], dtype="category", name="A")
df = ser.to_frame()
# Double-check the Series behavior is to raise
with pytest.raises(TypeError, match="does not implement reduction"):
getattr(ser, method)()
with pytest.raises(TypeError, match="does not implement reduction"):
getattr(np, method)(ser)
with pytest.raises(TypeError, match="does not implement reduction"):
getattr(df, method)(bool_only=False)
# With bool_only=None, operating on this column raises and is ignored,
# so we expect an empty result.
result = getattr(df, method)(bool_only=None)
expected = Series([], index=Index([]), dtype=bool)
tm.assert_series_equal(result, expected)
result = getattr(np, method)(df, axis=0)
tm.assert_series_equal(result, expected)
def test_median_categorical_dtype_nuisance_column(self):
# GH#21020 DataFrame.median should match Series.median
df = DataFrame({"A": Categorical([1, 2, 2, 2, 3])})
ser = df["A"]
# Double-check the Series behavior is to raise
with pytest.raises(TypeError, match="does not implement reduction"):
ser.median()
with pytest.raises(TypeError, match="does not implement reduction"):
df.median(numeric_only=False)
result = df.median()
expected = Series([], index=Index([]), dtype=np.float64)
tm.assert_series_equal(result, expected)
# same thing, but with an additional non-categorical column
df["B"] = df["A"].astype(int)
with pytest.raises(TypeError, match="does not implement reduction"):
df.median(numeric_only=False)
result = df.median()
expected = Series([2.0], index=["B"])
tm.assert_series_equal(result, expected)
# TODO: np.median(df, axis=0) gives np.array([2.0, 2.0]) instead
# of expected.values
@pytest.mark.parametrize("method", ["min", "max"])
def test_min_max_categorical_dtype_non_ordered_nuisance_column(self, method):
# GH#28949 DataFrame.min should behave like Series.min
cat = Categorical(["a", "b", "c", "b"], ordered=False)
ser = Series(cat)
df = ser.to_frame("A")
# Double-check the Series behavior
with pytest.raises(TypeError, match="is not ordered for operation"):
getattr(ser, method)()
with pytest.raises(TypeError, match="is not ordered for operation"):
getattr(np, method)(ser)
with pytest.raises(TypeError, match="is not ordered for operation"):
getattr(df, method)(numeric_only=False)
result = getattr(df, method)()
expected = Series([], index=Index([]), dtype=np.float64)
tm.assert_series_equal(result, expected)
result = getattr(np, method)(df)
tm.assert_series_equal(result, expected)
# same thing, but with an additional non-categorical column
df["B"] = df["A"].astype(object)
result = getattr(df, method)()
if method == "min":
expected = Series(["a"], index=["B"])
else:
expected = Series(["c"], index=["B"])
tm.assert_series_equal(result, expected)
result = getattr(np, method)(df)
tm.assert_series_equal(result, expected)
def test_reduction_object_block_splits_nuisance_columns(self):
# GH#37827
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", "c"]}, dtype=object)
# We should only exclude "B", not "A"
result = df.mean()
expected = Series([1.0], index=["A"])
tm.assert_series_equal(result, expected)
# Same behavior but heterogeneous dtype
df["C"] = df["A"].astype(int) + 4
result = df.mean()
expected = Series([1.0, 5.0], index=["A", "C"])
tm.assert_series_equal(result, expected)
def test_sum_timedelta64_skipna_false():
# GH#17235
arr = np.arange(8).astype(np.int64).view("m8[s]").reshape(4, 2)
arr[-1, -1] = "Nat"
df = DataFrame(arr)
result = df.sum(skipna=False)
expected = Series([pd.Timedelta(seconds=12), pd.NaT])
tm.assert_series_equal(result, expected)
result = df.sum(axis=0, skipna=False)
tm.assert_series_equal(result, expected)
result = df.sum(axis=1, skipna=False)
expected = Series(
[
pd.Timedelta(seconds=1),
pd.Timedelta(seconds=5),
pd.Timedelta(seconds=9),
pd.NaT,
]
)
tm.assert_series_equal(result, expected)
def test_mixed_frame_with_integer_sum():
# https://github.com/pandas-dev/pandas/issues/34520
df = DataFrame([["a", 1]], columns=list("ab"))
df = df.astype({"b": "Int64"})
result = df.sum()
expected = Series(["a", 1], index=["a", "b"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numeric_only", [True, False, None])
@pytest.mark.parametrize("method", ["min", "max"])
def test_minmax_extensionarray(method, numeric_only):
# https://github.com/pandas-dev/pandas/issues/32651
int64_info = np.iinfo("int64")
ser = Series([int64_info.max, None, int64_info.min], dtype=pd.Int64Dtype())
df = DataFrame({"Int64": ser})
result = getattr(df, method)(numeric_only=numeric_only)
expected = Series(
[getattr(int64_info, method)], index=Index(["Int64"], dtype="object")
)
tm.assert_series_equal(result, expected)
| true | true |
1c397d8363d117e09f5dbcb061e7fc8351cacae5 | 1,040 | py | Python | mys/transpiler/variables.py | Dogeek/mys | 193259a634c3ab1d9058b9ff79a0462ae86274b7 | [
"MIT"
] | null | null | null | mys/transpiler/variables.py | Dogeek/mys | 193259a634c3ab1d9058b9ff79a0462ae86274b7 | [
"MIT"
] | null | null | null | mys/transpiler/variables.py | Dogeek/mys | 193259a634c3ab1d9058b9ff79a0462ae86274b7 | [
"MIT"
] | null | null | null | class Variables:
"""A class that keeps track of which variables that are defined in all
branches, so that they can be used once they converges.
"""
def __init__(self):
self._first_add = True
self._local_variables = {}
def add_branch(self, variables):
"""Add all variables defined in a branch. Should be called once for
each branch.
"""
if self._first_add:
for name, info in variables.items():
self._local_variables[name] = info
self._first_add = False
else:
to_remove = []
for name, info in self._local_variables.items():
new_info = variables.get(name)
if new_info is None or new_info != info:
to_remove.append(name)
for name in to_remove:
self._local_variables.pop(name)
def defined(self):
"""A dictionary of all variables found in all branches.
"""
return self._local_variables
| 26 | 75 | 0.573077 | class Variables:
def __init__(self):
self._first_add = True
self._local_variables = {}
def add_branch(self, variables):
if self._first_add:
for name, info in variables.items():
self._local_variables[name] = info
self._first_add = False
else:
to_remove = []
for name, info in self._local_variables.items():
new_info = variables.get(name)
if new_info is None or new_info != info:
to_remove.append(name)
for name in to_remove:
self._local_variables.pop(name)
def defined(self):
return self._local_variables
| true | true |
1c397ea5dd0698ae77e20b40aa0bbd6c2324ff82 | 245 | py | Python | dataformats/server_data.py | uuu0614/discordpy_util_bot | 56a3bae2caf1b351d91b5515c9052a4cc716f832 | [
"MIT"
] | 2 | 2021-08-13T07:06:50.000Z | 2021-08-13T07:23:48.000Z | dataformats/server_data.py | uuu0614/discordpy_util_bot | 56a3bae2caf1b351d91b5515c9052a4cc716f832 | [
"MIT"
] | null | null | null | dataformats/server_data.py | uuu0614/discordpy_util_bot | 56a3bae2caf1b351d91b5515c9052a4cc716f832 | [
"MIT"
] | 2 | 2020-10-31T13:52:54.000Z | 2021-08-13T07:07:25.000Z | class ServerData:
def __init__(self):
self._prefixes = ["u!"]
# サーバープレフィックス
@property
def prefixes(self):
return self._prefixes
@prefixes.setter
def prefixes(self, prefs):
self._prefixes = prefs
| 18.846154 | 31 | 0.608163 | class ServerData:
def __init__(self):
self._prefixes = ["u!"]
@property
def prefixes(self):
return self._prefixes
@prefixes.setter
def prefixes(self, prefs):
self._prefixes = prefs
| true | true |
1c397eae11cd62664355034206a4949502e8204d | 528 | py | Python | readthedocs/core/permissions.py | phoenixflyinghigh/readthedocs.org | 2dc1615c674b08c8b681ac3543fee913c9d90a11 | [
"MIT"
] | 2 | 2019-11-19T20:50:25.000Z | 2021-04-26T21:59:29.000Z | readthedocs/core/permissions.py | phoenixflyinghigh/readthedocs.org | 2dc1615c674b08c8b681ac3543fee913c9d90a11 | [
"MIT"
] | 12 | 2019-12-05T04:47:01.000Z | 2022-01-09T00:56:58.000Z | readthedocs/core/permissions.py | phoenixflyinghigh/readthedocs.org | 2dc1615c674b08c8b681ac3543fee913c9d90a11 | [
"MIT"
] | 1 | 2020-01-09T02:35:45.000Z | 2020-01-09T02:35:45.000Z | # -*- coding: utf-8 -*-
"""Objects for User permission checks."""
from readthedocs.core.utils.extend import SettingsOverrideObject
class AdminPermissionBase:
@classmethod
def is_admin(cls, user, project):
return user in project.users.all() or user.is_superuser
@classmethod
def is_member(cls, user, obj):
return user in obj.users.all() or user.is_superuser
class AdminPermission(SettingsOverrideObject):
_default_class = AdminPermissionBase
_override_setting = 'ADMIN_PERMISSION'
| 24 | 64 | 0.729167 |
from readthedocs.core.utils.extend import SettingsOverrideObject
class AdminPermissionBase:
@classmethod
def is_admin(cls, user, project):
return user in project.users.all() or user.is_superuser
@classmethod
def is_member(cls, user, obj):
return user in obj.users.all() or user.is_superuser
class AdminPermission(SettingsOverrideObject):
_default_class = AdminPermissionBase
_override_setting = 'ADMIN_PERMISSION'
| true | true |
1c39817ceccba276386ffd7d3aeb8077d03f72dd | 12,588 | py | Python | wazimap/geo.py | CodeForAfricaLabs/wazimap | f3a756590f8067a48c5f3af7f848517053d859d8 | [
"MIT"
] | null | null | null | wazimap/geo.py | CodeForAfricaLabs/wazimap | f3a756590f8067a48c5f3af7f848517053d859d8 | [
"MIT"
] | 7 | 2018-08-21T12:38:30.000Z | 2019-02-26T06:21:54.000Z | wazimap/geo.py | CodeForAfricaLabs/wazimap | f3a756590f8067a48c5f3af7f848517053d859d8 | [
"MIT"
] | 1 | 2019-08-06T09:12:43.000Z | 2019-08-06T09:12:43.000Z | import os.path
import json
import logging
from itertools import chain
from django.conf import settings
from django.utils.module_loading import import_string
from django.db.models import Q
from django.contrib.staticfiles.storage import staticfiles_storage
from wazimap.models import Geography
log = logging.getLogger(__name__)
# GDAL is difficult to install, so we make it an optional dependency.
# Here, we check if it's installed and warn if it isn't.
try:
import osgeo.gdal # noqa
HAS_GDAL = True
except ImportError:
HAS_GDAL = False
class LocationNotFound(Exception):
pass
class GeoData(object):
""" General Wazimap geography helper object.
This object helps Wazimap load geographies, navigate geo level hierarchies,
find locations, etc. It's a good place to override this functionality
if you want to use a different geometry setup.
To override behaviour, implement your own GeoData object (probably inheriting
from this one), then set the `WAZIMAP['geodata']` to the dotted path of your
new class in your `settings.py`. Wazimap will then load that class and make
it available as `wazimap.geo.geo_data`.
"""
_versions = None
def __init__(self):
self.geo_model = Geography
self.setup_levels()
self.setup_geometry()
self._default_version = None
self._versions = None
self._global_latest_version = None
def _setup_versions(self):
""" Find all the geography versions.
"""
self._versions = [x['version'] for x in self.geo_model.objects.values('version').distinct().all()]
self._global_latest_version = sorted(self.versions)[-1]
# _default_version = None means fall back to whatever is latest for geography
self._default_version = settings.WAZIMAP['default_geo_version']
@property
def versions(self):
if self._versions is None:
self._setup_versions()
return self._versions
@property
def global_latest_version(self):
if self._global_latest_version is None:
self._setup_versions()
return self._global_latest_version
@property
def default_version(self):
if self._default_version is None:
self._setup_versions()
return self._default_version
def setup_levels(self):
""" Setup the summary level hierarchy from the `WAZIMAP['levels']` and
`WAZIMAP['comparative_levels']` settings.
"""
self.comparative_levels = ['this'] + settings.WAZIMAP['comparative_levels']
self.geo_levels = settings.WAZIMAP['levels']
parents = {}
for code, level in self.geo_levels.items():
level.setdefault('name', code)
level.setdefault('plural', code + 's')
level.setdefault('children', [])
level['sumlev'] = code
for kid in level['children']:
parents.setdefault(kid, []).append(code)
# fold in the ancestors
def climb(code):
return chain(parents.get(code, []), *[climb(p) for p in parents.get(code, [])])
for code, items in parents.items():
self.geo_levels[code]['ancestors'] = list(set(climb(code)))
# root level
roots = [key for key, lev in self.geo_levels.items() if not lev.get('ancestors')]
if not roots or len(roots) > 1:
raise ValueError("geo_levels must have a single root item, but we found: %s" % roots)
self.root_level = roots[0]
def setup_geometry(self):
""" Load boundaries from geojson shape files.
"""
# map from levels to a dict of geoid-keyed feature
# objects, including their geometry as shapely shapes
#
# eg.
#
# {
# 'province': {
# 'GT': {
# 'properties': { ... },
# 'shape': <shapely shape>
# }
# }
# }
#
self.geometry = {}
self.geometry_files = settings.WAZIMAP.get('geometry_data', {})
for level in self.geo_levels.keys():
# sanity check for geo version
if level in self.geometry_files or self.geometry_files.keys() == [''] and isinstance(self.geometry_files[''], basestring):
# The geometry_data must include a version key. For example:
#
# geometry_data = {
# '2011': {
# 'province': 'geo/2011/country.geojson',
# 'country': 'geo/2011/country.geojson',
# }, {
# '2016': {
# 'province': 'geo/2016/country.geojson',
# 'country': 'geo/2016/country.geojson',
# }
# }
#
# If you aren't using geo versioning, then use the default geo
# version '' as the first key:
#
# geometry_data = {
# '': {
# 'province': 'geo/2011/country.geojson',
# 'country': 'geo/2011/country.geojson',
# }
# }
suggestion = {'': self.geometry_files}
raise ValueError("The geometry_data setting is missing a geometry version key. You probably aren't using geometry versions just need to " +
"change WAZIMAP['geometry_data'] to be: %s" % suggestion)
for version in self.geometry_files.keys():
fname, js = self.load_geojson_for_level(level, version)
if not js:
continue
if js['type'] != 'FeatureCollection':
raise ValueError("GeoJSON files must contain a FeatureCollection. The file %s has type %s" % (fname, js['type']))
level_detail = self.geometry.setdefault(version, {}).setdefault(level, {})
for feature in js['features']:
props = feature['properties']
shape = None
if HAS_GDAL and feature['geometry']:
from shapely.geometry import asShape
try:
shape = asShape(feature['geometry'])
except ValueError as e:
log.error("Error parsing geometry for %s-%s from %s: %s. Feature: %s"
% (level, props['code'], fname, e.message, feature), exc_info=e)
raise e
level_detail[props['code']] = {
'properties': props,
'shape': shape
}
def load_geojson_for_level(self, level, version):
files = self.geometry_files[version]
fname = files.get(level, files.get(''))
if not fname:
return None, None
# we have to have geojson
name, ext = os.path.splitext(fname)
if ext != '.geojson':
fname = name + '.geojson'
fname = staticfiles_storage.path(fname)
# try load it
try:
with open(fname, 'r') as f:
return fname, json.load(f)
except IOError as e:
if e.errno == 2:
log.warn("Couldn't open geometry file %s -- no geometry will be available for level %s and version '%s'" % (fname, level, version))
else:
raise e
return None, None
def root_geography(self, version=None):
""" First geography with no parents. """
query = self.geo_model.objects.filter(parent_level=None, parent_code=None, geo_level=self.root_level)
if version is None:
version = self.default_version
if version is None:
query = query.order_by("-version")
else:
query = query.filter(version=version)
return query.first()
def get_geography(self, geo_code, geo_level, version=None):
""" Get a geography object for this geography, or raise LocationNotFound if it doesn't exist.
If a version is given, find a geography with that version. Otherwise find the most recent version.
"""
query = self.geo_model.objects.filter(geo_level=geo_level, geo_code=geo_code)
if version is None:
version = self.default_version
if version is None:
query = query.order_by("-version")
else:
query = query.filter(version=version)
geo = query.first()
if not geo:
raise LocationNotFound("Invalid level, code and version: %s-%s '%s'" % (geo_level, geo_code, version))
return geo
def get_geometry(self, geo):
""" Get the geometry description for a geography. This is a dict
with two keys, 'properties' which is a dict of properties,
and 'shape' which is a shapely shape (may be None).
"""
return self.geometry.get(geo.version, {}).get(geo.geo_level, {}).get(geo.geo_code)
def get_locations(self, search_term, levels=None, version=None):
"""
Try to find locations based on a search term, possibly limited
to +levels+.
Returns an ordered list of geo models.
"""
search_term = search_term.strip()
query = self.geo_model.objects\
.filter(Q(name__icontains=search_term) |
Q(geo_code=search_term.upper()))
if version is None:
version = self.default_version
if version is None:
version = self.global_latest_version
if levels:
query = query.filter(geo_level__in=levels)
# TODO: order by level?
objects = sorted(query[:10], key=lambda o: [o.geo_level, o.name, o.geo_code])
return objects
def get_locations_from_coords(self, longitude, latitude, levels=None, version=None):
"""
Returns a list of geographies containing this point.
"""
if not HAS_GDAL:
gdal_missing(critical=True)
from shapely.geometry import Point
p = Point(float(longitude), float(latitude))
geos = []
if version is None:
version = self.default_version
if version is None:
version = self.global_latest_version
for features in self.geometry.itervalues():
for feature in features.itervalues():
if feature['shape'] and feature['shape'].contains(p):
geo = self.get_geography(feature['properties']['code'],
feature['properties']['level'],
version)
if not levels or geo.geo_level in levels:
geos.append(geo)
return geos
def get_summary_geo_info(self, geo):
""" Get a list of (level, code) tuples of geographies that
this geography should be compared against.
This is the intersection of +comparative_levels+ and the
ancestors of the geography.
"""
ancestors = {g.geo_level: g for g in geo.ancestors()}
return [(lev, ancestors[lev].geo_code) for lev in self.comparative_levels if lev in ancestors]
def get_comparative_geos(self, geo):
""" Get a list of geographies to be used as comparisons for +geo+.
"""
return [self.get_geography(code, level, geo.version) for level, code in self.get_summary_geo_info(geo)]
def first_child_level(self):
# first child level in the hierarchy
return self.geo_levels[self.root_level]['children'][0]
def primary_release_year(self, geo):
""" Return the primary release year to use for the provided geography.
This uses the `WAZIMAP['primary_release_year']` setting to lookup the
year based on the geo's level, and defaults to `latest`.
"""
return settings.WAZIMAP['primary_release_year'].get(geo.geo_level, 'latest')
geo_data = import_string(settings.WAZIMAP['geodata'])()
def gdal_missing(critical=False):
log.warn("NOTE: Wazimap is unable to load GDAL, it's probably not installed. "
"Some functionality such as data downloads and geolocation won't work. This is ok in development, but "
"is a problem in production. For more information on installing GDAL, see http://wazimap.readthedocs.io/en/latest/")
if critical:
raise Exception("GDAL must be installed for this functionality to work.")
| 37.688623 | 155 | 0.582936 | import os.path
import json
import logging
from itertools import chain
from django.conf import settings
from django.utils.module_loading import import_string
from django.db.models import Q
from django.contrib.staticfiles.storage import staticfiles_storage
from wazimap.models import Geography
log = logging.getLogger(__name__)
try:
import osgeo.gdal
HAS_GDAL = True
except ImportError:
HAS_GDAL = False
class LocationNotFound(Exception):
pass
class GeoData(object):
_versions = None
def __init__(self):
self.geo_model = Geography
self.setup_levels()
self.setup_geometry()
self._default_version = None
self._versions = None
self._global_latest_version = None
def _setup_versions(self):
self._versions = [x['version'] for x in self.geo_model.objects.values('version').distinct().all()]
self._global_latest_version = sorted(self.versions)[-1]
self._default_version = settings.WAZIMAP['default_geo_version']
@property
def versions(self):
if self._versions is None:
self._setup_versions()
return self._versions
@property
def global_latest_version(self):
if self._global_latest_version is None:
self._setup_versions()
return self._global_latest_version
@property
def default_version(self):
if self._default_version is None:
self._setup_versions()
return self._default_version
def setup_levels(self):
self.comparative_levels = ['this'] + settings.WAZIMAP['comparative_levels']
self.geo_levels = settings.WAZIMAP['levels']
parents = {}
for code, level in self.geo_levels.items():
level.setdefault('name', code)
level.setdefault('plural', code + 's')
level.setdefault('children', [])
level['sumlev'] = code
for kid in level['children']:
parents.setdefault(kid, []).append(code)
def climb(code):
return chain(parents.get(code, []), *[climb(p) for p in parents.get(code, [])])
for code, items in parents.items():
self.geo_levels[code]['ancestors'] = list(set(climb(code)))
roots = [key for key, lev in self.geo_levels.items() if not lev.get('ancestors')]
if not roots or len(roots) > 1:
raise ValueError("geo_levels must have a single root item, but we found: %s" % roots)
self.root_level = roots[0]
def setup_geometry(self):
self.geometry = {}
self.geometry_files = settings.WAZIMAP.get('geometry_data', {})
for level in self.geo_levels.keys():
if level in self.geometry_files or self.geometry_files.keys() == [''] and isinstance(self.geometry_files[''], basestring):
# version '' as the first key:
#
# geometry_data = {
# '': {
# 'province': 'geo/2011/country.geojson',
# 'country': 'geo/2011/country.geojson',
# }
# }
suggestion = {'': self.geometry_files}
raise ValueError("The geometry_data setting is missing a geometry version key. You probably aren't using geometry versions just need to " +
"change WAZIMAP['geometry_data'] to be: %s" % suggestion)
for version in self.geometry_files.keys():
fname, js = self.load_geojson_for_level(level, version)
if not js:
continue
if js['type'] != 'FeatureCollection':
raise ValueError("GeoJSON files must contain a FeatureCollection. The file %s has type %s" % (fname, js['type']))
level_detail = self.geometry.setdefault(version, {}).setdefault(level, {})
for feature in js['features']:
props = feature['properties']
shape = None
if HAS_GDAL and feature['geometry']:
from shapely.geometry import asShape
try:
shape = asShape(feature['geometry'])
except ValueError as e:
log.error("Error parsing geometry for %s-%s from %s: %s. Feature: %s"
% (level, props['code'], fname, e.message, feature), exc_info=e)
raise e
level_detail[props['code']] = {
'properties': props,
'shape': shape
}
def load_geojson_for_level(self, level, version):
files = self.geometry_files[version]
fname = files.get(level, files.get(''))
if not fname:
return None, None
name, ext = os.path.splitext(fname)
if ext != '.geojson':
fname = name + '.geojson'
fname = staticfiles_storage.path(fname)
try:
with open(fname, 'r') as f:
return fname, json.load(f)
except IOError as e:
if e.errno == 2:
log.warn("Couldn't open geometry file %s -- no geometry will be available for level %s and version '%s'" % (fname, level, version))
else:
raise e
return None, None
def root_geography(self, version=None):
query = self.geo_model.objects.filter(parent_level=None, parent_code=None, geo_level=self.root_level)
if version is None:
version = self.default_version
if version is None:
query = query.order_by("-version")
else:
query = query.filter(version=version)
return query.first()
def get_geography(self, geo_code, geo_level, version=None):
query = self.geo_model.objects.filter(geo_level=geo_level, geo_code=geo_code)
if version is None:
version = self.default_version
if version is None:
query = query.order_by("-version")
else:
query = query.filter(version=version)
geo = query.first()
if not geo:
raise LocationNotFound("Invalid level, code and version: %s-%s '%s'" % (geo_level, geo_code, version))
return geo
def get_geometry(self, geo):
return self.geometry.get(geo.version, {}).get(geo.geo_level, {}).get(geo.geo_code)
def get_locations(self, search_term, levels=None, version=None):
search_term = search_term.strip()
query = self.geo_model.objects\
.filter(Q(name__icontains=search_term) |
Q(geo_code=search_term.upper()))
if version is None:
version = self.default_version
if version is None:
version = self.global_latest_version
if levels:
query = query.filter(geo_level__in=levels)
# TODO: order by level?
objects = sorted(query[:10], key=lambda o: [o.geo_level, o.name, o.geo_code])
return objects
def get_locations_from_coords(self, longitude, latitude, levels=None, version=None):
if not HAS_GDAL:
gdal_missing(critical=True)
from shapely.geometry import Point
p = Point(float(longitude), float(latitude))
geos = []
if version is None:
version = self.default_version
if version is None:
version = self.global_latest_version
for features in self.geometry.itervalues():
for feature in features.itervalues():
if feature['shape'] and feature['shape'].contains(p):
geo = self.get_geography(feature['properties']['code'],
feature['properties']['level'],
version)
if not levels or geo.geo_level in levels:
geos.append(geo)
return geos
def get_summary_geo_info(self, geo):
ancestors = {g.geo_level: g for g in geo.ancestors()}
return [(lev, ancestors[lev].geo_code) for lev in self.comparative_levels if lev in ancestors]
def get_comparative_geos(self, geo):
return [self.get_geography(code, level, geo.version) for level, code in self.get_summary_geo_info(geo)]
def first_child_level(self):
# first child level in the hierarchy
return self.geo_levels[self.root_level]['children'][0]
def primary_release_year(self, geo):
return settings.WAZIMAP['primary_release_year'].get(geo.geo_level, 'latest')
geo_data = import_string(settings.WAZIMAP['geodata'])()
def gdal_missing(critical=False):
log.warn("NOTE: Wazimap is unable to load GDAL, it's probably not installed. "
"Some functionality such as data downloads and geolocation won't work. This is ok in development, but "
"is a problem in production. For more information on installing GDAL, see http://wazimap.readthedocs.io/en/latest/")
if critical:
raise Exception("GDAL must be installed for this functionality to work.")
| true | true |
1c3981d57b8b5192e94ebcd87abd51f7a5b55f8d | 1,289 | py | Python | autorooms/bot.py | alanissak320/autorooms | a4ff150ba263382e0bfc9849bed847f862ff996c | [
"MIT"
] | null | null | null | autorooms/bot.py | alanissak320/autorooms | a4ff150ba263382e0bfc9849bed847f862ff996c | [
"MIT"
] | null | null | null | autorooms/bot.py | alanissak320/autorooms | a4ff150ba263382e0bfc9849bed847f862ff996c | [
"MIT"
] | 1 | 2020-06-09T16:29:08.000Z | 2020-06-09T16:29:08.000Z | import logging
import discord
from discord.ext import commands
from discord.voice_client import VoiceClient
VoiceClient.warn_nacl = False
class ARBot(commands.AutoShardedBot):
"""
Autorooms bot
"""
def __init__(self, *args, initial_exts: tuple = None, **kwargs):
self.uptime = None
self.initial_extensions = initial_exts or (
"autorooms.extensions.autorooms",
"autorooms.extensions.info",
)
self.invite_link = None
super().__init__(*args, command_prefix=commands.when_mentioned, **kwargs)
async def on_ready(self):
if self.uptime is not None:
return
for extension in self.initial_extensions:
try:
self.load_extension(extension)
except discord.ClientException as e:
logging.exception(e)
await self.change_presence(
status=discord.Status.online,
activity=discord.Game(name='mention me with "help" for help'),
)
data = await self.application_info()
perms = discord.Permissions(permissions=16796688)
self.invite_link = discord.utils.oauth_url(data.id, permissions=perms)
print(f"Use this link to add the bot to your server: {self.invite_link}")
| 30.690476 | 81 | 0.643134 | import logging
import discord
from discord.ext import commands
from discord.voice_client import VoiceClient
VoiceClient.warn_nacl = False
class ARBot(commands.AutoShardedBot):
def __init__(self, *args, initial_exts: tuple = None, **kwargs):
self.uptime = None
self.initial_extensions = initial_exts or (
"autorooms.extensions.autorooms",
"autorooms.extensions.info",
)
self.invite_link = None
super().__init__(*args, command_prefix=commands.when_mentioned, **kwargs)
async def on_ready(self):
if self.uptime is not None:
return
for extension in self.initial_extensions:
try:
self.load_extension(extension)
except discord.ClientException as e:
logging.exception(e)
await self.change_presence(
status=discord.Status.online,
activity=discord.Game(name='mention me with "help" for help'),
)
data = await self.application_info()
perms = discord.Permissions(permissions=16796688)
self.invite_link = discord.utils.oauth_url(data.id, permissions=perms)
print(f"Use this link to add the bot to your server: {self.invite_link}")
| true | true |
1c3981ecbc3eb2c8ace42b9728f0a7df9a138614 | 237,626 | py | Python | tensorflow/python/ops/array_ops.py | mathemakitten/tensorflow | e62a6a8be2f9cfb79913bdb64f99efb5e88df0df | [
"Apache-2.0"
] | 1 | 2021-03-09T04:12:46.000Z | 2021-03-09T04:12:46.000Z | tensorflow/python/ops/array_ops.py | mathemakitten/tensorflow | e62a6a8be2f9cfb79913bdb64f99efb5e88df0df | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/array_ops.py | mathemakitten/tensorflow | e62a6a8be2f9cfb79913bdb64f99efb5e88df0df | [
"Apache-2.0"
] | 1 | 2021-10-15T06:37:59.000Z | 2021-10-15T06:37:59.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import
from tensorflow.python.types import core
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep Python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
@tf_export("reshape", v1=["reshape", "manip.reshape"])
@dispatch.add_dispatch_support
def reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name
r"""Reshapes a tensor.
Given `tensor`, this operation returns a new `tf.Tensor` that has the same
values as `tensor` in the same order, except with a new shape given by
`shape`.
>>> t1 = [[1, 2, 3],
... [4, 5, 6]]
>>> print(tf.shape(t1).numpy())
[2 3]
>>> t2 = tf.reshape(t1, [6])
>>> t2
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
>>> tf.reshape(t2, [3, 2])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
The `tf.reshape` does not change the order of or the total number of elements
in the tensor, and so it can reuse the underlying data buffer. This makes it
a fast operation independent of how big of a tensor it is operating on.
>>> tf.reshape([1, 2, 3], [2, 2])
Traceback (most recent call last):
...
InvalidArgumentError: Input to reshape is a tensor with 3 values, but the
requested shape has 4
To instead reorder the data to rearrange the dimensions of a tensor, see
`tf.transpose`.
>>> t = [[1, 2, 3],
... [4, 5, 6]]
>>> tf.reshape(t, [3, 2]).numpy()
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)
>>> tf.transpose(t, perm=[1, 0]).numpy()
array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
If one component of `shape` is the special value -1, the size of that
dimension is computed so that the total size remains constant. In particular,
a `shape` of `[-1]` flattens into 1-D. At most one component of `shape` can
be -1.
>>> t = [[1, 2, 3],
... [4, 5, 6]]
>>> tf.reshape(t, [-1])
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 2, 3, 4, 5, 6], dtype=int32)>
>>> tf.reshape(t, [3, -1])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
>>> tf.reshape(t, [-1, 2])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)>
`tf.reshape(t, [])` reshapes a tensor `t` with one element to a scalar.
>>> tf.reshape([7], []).numpy()
7
More examples:
>>> t = [1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(tf.shape(t).numpy())
[9]
>>> tf.reshape(t, [3, 3])
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=int32)>
>>> t = [[[1, 1], [2, 2]],
... [[3, 3], [4, 4]]]
>>> print(tf.shape(t).numpy())
[2 2 2]
>>> tf.reshape(t, [2, 4])
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[1, 1, 2, 2],
[3, 3, 4, 4]], dtype=int32)>
>>> t = [[[1, 1, 1],
... [2, 2, 2]],
... [[3, 3, 3],
... [4, 4, 4]],
... [[5, 5, 5],
... [6, 6, 6]]]
>>> print(tf.shape(t).numpy())
[3 2 3]
>>> # Pass '[-1]' to flatten 't'.
>>> tf.reshape(t, [-1])
<tf.Tensor: shape=(18,), dtype=int32,
numpy=array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],
dtype=int32)>
>>> # -- Using -1 to infer the shape --
>>> # Here -1 is inferred to be 9:
>>> tf.reshape(t, [2, -1])
<tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
>>> # -1 is inferred to be 2:
>>> tf.reshape(t, [-1, 9])
<tf.Tensor: shape=(2, 9), dtype=int32, numpy=
array([[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]], dtype=int32)>
>>> # -1 is inferred to be 3:
>>> tf.reshape(t, [ 2, -1, 3])
<tf.Tensor: shape=(2, 3, 3), dtype=int32, numpy=
array([[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]], dtype=int32)>
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
name: Optional string. A name for the operation.
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
result = gen_array_ops.reshape(tensor, shape, name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("fill")
@dispatch.add_dispatch_support
def fill(dims, value, name=None):
r"""Creates a tensor filled with a scalar value.
See also `tf.ones`, `tf.zeros`, `tf.one_hot`, `tf.eye`.
This operation creates a tensor of shape `dims` and fills it with `value`.
For example:
>>> tf.fill([2, 3], 9)
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[9, 9, 9],
[9, 9, 9]], dtype=int32)>
`tf.fill` evaluates at graph runtime and supports dynamic shapes based on
other runtime `tf.Tensors`, unlike `tf.constant(value, shape=dims)`, which
embeds the value as a `Const` node.
Args:
dims: A 1-D sequence of non-negative numbers. Represents the shape of the
output `tf.Tensor`. Entries should be of type: `int32`, `int64`.
value: A value to fill the returned `tf.Tensor`.
name: Optional string. The name of the output `tf.Tensor`.
Returns:
A `tf.Tensor` with shape `dims` and the same dtype as `value`.
Raises:
InvalidArgumentError: `dims` contains negative entries.
NotFoundError: `dims` contains non-integer entries.
@compatibility(numpy)
Similar to `np.full`. In `numpy`, more parameters are supported. Passing a
number argument as the shape (`np.full(5, value)`) is valid in `numpy` for
specifying a 1-D shaped result, while TensorFlow does not support this syntax.
@end_compatibility
"""
result = gen_array_ops.fill(dims, value, name=name)
tensor_util.maybe_set_static_shape(result, dims)
return result
@tf_export("identity")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a Tensor with the same shape and contents as input.
The return value is not the same Tensor as the original, but contains the same
values. This operation is fast when used on the same device.
For example:
>>> a = tf.constant([0.78])
>>> a_identity = tf.identity(a)
>>> a.numpy()
array([0.78], dtype=float32)
>>> a_identity.numpy()
array([0.78], dtype=float32)
Calling `tf.identity` on a variable will make a Tensor that represents the
value of that variable at the time it is called. This is equivalent to calling
`<variable>.read_value()`.
>>> a = tf.Variable(5)
>>> a_identity = tf.identity(a)
>>> a.assign_add(1)
<tf.Variable ... shape=() dtype=int32, numpy=6>
>>> a.numpy()
6
>>> a_identity.numpy()
5
Args:
input: A `Tensor`, a `Variable`, a `CompositeTensor` or anything that can be
converted to a tensor using `tf.convert_to_tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or CompositeTensor. Has the same type and contents as `input`.
"""
if isinstance(input, composite_tensor.CompositeTensor):
return nest.map_structure(identity, input, expand_composites=True)
if context.executing_eagerly() and not hasattr(input, "graph"):
# Make sure we get an input with handle data attached from resource
# variables. Variables have correct handle data when graph building.
input = ops.convert_to_tensor(input)
ret = gen_array_ops.identity(input, name=name)
# Propagate handle data for happier shape inference for resource variables.
if hasattr(input, "_handle_data"):
ret._handle_data = input._handle_data # pylint: disable=protected-access
return ret
# pylint: disable=redefined-builtin,protected-access
@tf_export(v1=["expand_dims"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
"""Returns a tensor with a length 1 axis inserted at index `axis`.
Given a tensor `input`, this operation inserts a dimension of length 1 at the
dimension index `axis` of `input`'s shape. The dimension index follows Python
indexing rules: It's zero-based, a negative index it is counted backward
from the end.
This operation is useful to:
* Add an outer "batch" dimension to a single element.
* Align axes for broadcasting.
* To add an inner vector length axis to a tensor of scalars.
For example:
If you have a single image of shape `[height, width, channels]`:
>>> image = tf.zeros([10,10,3])
You can add an outer `batch` axis by passing `axis=0`:
>>> tf.expand_dims(image, axis=0).shape.as_list()
[1, 10, 10, 3]
The new axis location matches Python `list.insert(axis, 1)`:
>>> tf.expand_dims(image, axis=1).shape.as_list()
[10, 1, 10, 3]
Following standard Python indexing rules, a negative `axis` counts from the
end so `axis=-1` adds an inner most dimension:
>>> tf.expand_dims(image, -1).shape.as_list()
[10, 10, 3, 1]
This operation requires that `axis` is a valid index for `input.shape`,
following Python indexing rules:
```
-1-tf.rank(input) <= axis <= tf.rank(input)
```
This operation is related to:
* `tf.squeeze`, which removes dimensions of size 1.
* `tf.reshape`, which provides more flexible reshaping capability.
* `tf.sparse.expand_dims`, which provides this functionality for
`tf.SparseTensor`
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to expand the
shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if either both or neither of `dim` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("Must specify an axis argument to tf.expand_dims()")
return expand_dims_v2(input, axis, name)
@tf_export("expand_dims", v1=[])
@dispatch.add_dispatch_support
def expand_dims_v2(input, axis, name=None):
"""Returns a tensor with a length 1 axis inserted at index `axis`.
Given a tensor `input`, this operation inserts a dimension of length 1 at the
dimension index `axis` of `input`'s shape. The dimension index follows Python
indexing rules: It's zero-based, a negative index it is counted backward
from the end.
This operation is useful to:
* Add an outer "batch" dimension to a single element.
* Align axes for broadcasting.
* To add an inner vector length axis to a tensor of scalars.
For example:
If you have a single image of shape `[height, width, channels]`:
>>> image = tf.zeros([10,10,3])
You can add an outer `batch` axis by passing `axis=0`:
>>> tf.expand_dims(image, axis=0).shape.as_list()
[1, 10, 10, 3]
The new axis location matches Python `list.insert(axis, 1)`:
>>> tf.expand_dims(image, axis=1).shape.as_list()
[10, 1, 10, 3]
Following standard Python indexing rules, a negative `axis` counts from the
end so `axis=-1` adds an inner most dimension:
>>> tf.expand_dims(image, -1).shape.as_list()
[10, 10, 3, 1]
This operation requires that `axis` is a valid index for `input.shape`,
following Python indexing rules:
```
-1-tf.rank(input) <= axis <= tf.rank(input)
```
This operation is related to:
* `tf.squeeze`, which removes dimensions of size 1.
* `tf.reshape`, which provides more flexible reshaping capability.
* `tf.sparse.expand_dims`, which provides this functionality for
`tf.SparseTensor`
Args:
input: A `Tensor`.
axis: Integer specifying the dimension index at which to expand the
shape of `input`. Given an input of D dimensions, `axis` must be in range
`[-(D+1), D]` (inclusive).
name: Optional string. The name of the output `Tensor`.
Returns:
A tensor with the same data as `input`, with an additional dimension
inserted at the index specified by `axis`.
Raises:
TypeError: If `axis` is not specified.
InvalidArgumentError: If `axis` is out of range `[-(D+1), D]`.
"""
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated("2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@deprecation.deprecated("2018-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.sets.difference().")
@tf_export(v1=["setdiff1d"])
@dispatch.add_dispatch_support
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
"""Computes the difference between two lists of numbers or strings.
Given a list x and a list y, this operation returns a list out that
represents all values that are in x but not in y. The returned list
out is sorted in the same order that the numbers appear in x
(duplicates are preserved). This operation also returns a list idx
that represents the position of each out element in x.
In other words:
```python
out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]
```
Example usage:
>>> x = [1, 2, 3, 4, 5, 6]
>>> y = [1, 3, 5]
>>> setdiff1d(x,y)
ListDiff(out=<tf.Tensor: id=2, shape=(3,), dtype=int32,
numpy=array([2, 4, 6], dtype=int32)>, idx=<tf.Tensor: id=3,
shape=(3,), dtype=int32, numpy=array([1, 3, 5], dtype=int32)>)
Args:
x: A Tensor. 1-D. Values to keep.
y: A Tensor. Must have the same type as x. 1-D. Values to remove.
out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to
tf.int32.
name: A name for the operation (optional).
Returns:
A tuple of Tensor objects (out, idx).
out: A Tensor. Has the same type as x.
idx: A Tensor of type out_idx.
"""
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
@dispatch.add_dispatch_support
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When `shape_x` and `shape_y` are Tensors representing shapes (i.e. the result
of calling tf.shape on another Tensor) this computes a Tensor which is the
shape of the result of a broadcasting op applied in tensors of shapes
`shape_x` and `shape_y`.
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Example:
>>> shape_x = (1, 2, 3)
>>> shape_y = (5, 1, 3)
>>> tf.broadcast_dynamic_shape(shape_x, shape_y)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([5, 2, 3], ...>
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
Raises:
InvalidArgumentError: If the two shapes are incompatible for
broadcasting.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
@dispatch.add_dispatch_support
def broadcast_static_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given known shapes.
When `shape_x` and `shape_y` are fully known `TensorShape`s this computes a
`TensorShape` which is the shape of the result of a broadcasting op applied in
tensors of shapes `shape_x` and `shape_y`.
For example, if shape_x is `TensorShape([1, 2, 3])` and shape_y is
`TensorShape([5, 1, 3])`, the result is a TensorShape whose value is
`TensorShape([5, 2, 3])`.
This is useful when validating the result of a broadcasting operation when the
tensors have statically known shapes.
Example:
>>> shape_x = tf.TensorShape([1, 2, 3])
>>> shape_y = tf.TensorShape([5, 1 ,3])
>>> tf.broadcast_static_shape(shape_x, shape_y)
TensorShape([5, 2, 3])
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape", v1=[])
@dispatch.add_dispatch_support
def shape_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns a tensor containing the shape of the input tensor.
See also `tf.size`, `tf.rank`.
`tf.shape` returns a 1-D integer tensor representing the shape of `input`.
For a scalar input, the tensor returned has a shape of (0,) and its value is
the empty vector (i.e. []).
For example:
>>> tf.shape(1.)
<tf.Tensor: shape=(0,), dtype=int32, numpy=array([], dtype=int32)>
>>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
>>> tf.shape(t)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([2, 2, 3], dtype=int32)>
Note: When using symbolic tensors, such as when using the Keras API,
tf.shape() will return the shape of the symbolic tensor.
>>> a = tf.keras.layers.Input((None, 10))
>>> tf.shape(a)
<... shape=(3,) dtype=int32...>
In these cases, using `tf.Tensor.shape` will return more informative results.
>>> a.shape
TensorShape([None, None, 10])
(The first `None` represents the as yet unknown batch size.)
`tf.shape` and `Tensor.shape` should be identical in eager mode. Within
`tf.function` or within a `compat.v1` context, not all dimensions may be
known until execution time. Hence when defining custom layers and models
for graph mode, prefer the dynamic `tf.shape(x)` over the static `x.shape`.
Args:
input: A `Tensor` or `SparseTensor`.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
return shape(input, name, out_type)
@tf_export(v1=["shape"])
@dispatch.add_dispatch_support
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation (`int32`
or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation (`int32` or
`int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
@dispatch.add_dispatch_support
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation (`int32` or `int64`).
Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size", v1=[])
@dispatch.add_dispatch_support
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
See also `tf.shape`.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
>>> t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
>>> tf.size(t)
<tf.Tensor: shape=(), dtype=int32, numpy=12>
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size(input, name, out_type)
@tf_export(v1=["size"])
@dispatch.add_dispatch_support
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type of the
operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if (context.executing_eagerly() and not hasattr(input, "graph") and
not isinstance(
input,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
@dispatch.add_dispatch_support
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
See also `tf.shape`.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
_SLICE_TYPE_ERROR = (
"Only integers, slices (`:`), ellipsis (`...`), "
"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
"indices")
_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,
dtypes.int64_ref)
def _check_index(idx):
"""Check if a given value is a valid index into a tensor."""
if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
return
# Optimistic check. Assumptions:
# * any object with a dtype is supported
# * any object with a dtype has a sizeable shape attribute.
dtype = getattr(idx, "dtype", None)
if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
idx.shape and len(idx.shape) == 1):
# TODO(slebedev): IndexError seems more appropriate here, but it
# will break `_slice_helper` contract.
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
def _is_undefined_dimension(d):
return isinstance(d, tensor_shape.Dimension) and d.value is None
@tf_export("__operators__.getitem", v1=[])
@dispatch.add_dispatch_support
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# Strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# Skip every other row and reverse the order of the columns
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
# Masks
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__getitem__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable object to slice
(i.e. tensor is the read-only view of this variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, ellipsis,
tf.newaxis or scalar int32/int64 tensors.
"""
tensor = ops.convert_to_tensor(tensor)
# TODO(wangpeng): Consider supporting var
if var is None and ops._numpy_style_slicing: # pylint: disable=protected-access
return tensor._numpy_style_getitem(slice_spec) # pylint: disable=protected-access
if isinstance(slice_spec, bool) or \
(isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
(isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
return boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
if s.start is not None and not _is_undefined_dimension(s.start):
_check_index(s.start)
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and not _is_undefined_dimension(s.stop):
_check_index(s.stop)
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None and not _is_undefined_dimension(s.step):
_check_index(s.step)
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
_check_index(s)
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
"strided_slice", [tensor] + begin + end + strides,
skip_on_eager=False) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
@dispatch.add_dispatch_support
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
See also `tf.strided_slice`.
This operation extracts a slice of size `size` from a tensor `input_` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input_` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input_`. In other
words, `begin[i]` is the offset into the i'th dimension of `input_` that you
want to slice from.
Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input_.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input_`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
@dispatch.add_dispatch_support
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized Python array indexing).
See also `tf.slice`.
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1, taking on the value at index
`begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
if var is not None:
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: TypeError: If the slice indices aren't int, slice,
ellipsis, tf.newaxis or int32/int64 tensors.
"""
return _slice_helper(var.value(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
@dispatch.add_dispatch_support
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
@compatibility(eager)
parallel_stack is not compatible with eager execution.
@end_compatibility
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
RuntimeError: if executed in eager mode.
"""
if context.executing_eagerly():
raise RuntimeError("tf.parallel_stack() is not compatible with "
"eager execution.")
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
@dispatch.add_dispatch_support
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
See also `tf.concat`, `tf.tile`, `tf.repeat`.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
>>> x = tf.constant([1, 4])
>>> y = tf.constant([2, 5])
>>> z = tf.constant([3, 6])
>>> tf.stack([x, y, z])
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)>
>>> tf.stack([x, y, z], axis=1)
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
This is the opposite of unstack. The numpy equivalent is `np.stack`
>>> np.array_equal(np.stack([x, y, z]), tf.stack([x, y, z]))
True
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
# checking.
if all(isinstance(elem, core.Tensor) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if isinstance(elem, core.Tensor):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" %
(elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if isinstance(converted_elem, core.Tensor):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if isinstance(elem, core.Tensor):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be converted
to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if isinstance(elem, core.Tensor):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _cast_nested_seqs_to_dtype(dtype):
def _maybe_cast(elem):
if isinstance(elem, core.Tensor):
if dtype != elem.dtype.base_dtype:
elem = gen_math_ops.cast(elem, dtype)
return elem
return _maybe_cast
_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)
_NON_AUTOPACKABLE_TYPES.add(np.ndarray)
def _should_not_autopack(v):
# The condition we really want is
# any(isinstance(elem, core.Tensor))
# but it is >5x slower due to abc.ABCMeta.__instancecheck__.
# pylint: disable=unidiomatic-typecheck
# TODO(slebedev): add nest.all?
return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))
# pylint: enable=unidiomatic-typecheck
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref or _should_not_autopack(v):
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is None:
dtype = inferred_dtype
elif dtype != inferred_dtype:
v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
return _autopacking_helper(v, dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
@dispatch.add_dispatch_support
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks tensors from `value` by chipping it along the `axis` dimension.
>>> x = tf.reshape(tf.range(12), (3,4))
>>>
>>> p, q, r = tf.unstack(x)
>>> p.shape.as_list()
[4]
>>> i, j, k, l = tf.unstack(x, axis=1)
>>> i.shape.as_list()
[3]
This is the opposite of stack.
>>> x = tf.stack([i, j, k, l], axis=1)
More generally if you have a tensor of shape `(A, B, C, D)`:
>>> A, B, C, D = [2, 3, 4, 5]
>>> t = tf.random.normal(shape=[A, B, C, D])
The number of tensor returned is equal to the length of the target `axis`:
>>> axis = 2
>>> items = tf.unstack(t, axis=axis)
>>> len(items) == t.shape[axis]
True
The shape of each result tensor is equal to the shape of the input tensor,
with the target `axis` removed.
>>> items[0].shape.as_list() # [A, B, D]
[2, 3, 5]
The value of each tensor `items[i]` is equal to the slice of `input` across
`axis` at index `i`:
>>> for i in range(len(items)):
... slice = t[:,:,i,:]
... assert tf.reduce_all(slice == items[i])
#### Python iterable unpacking
With eager execution you _can_ unstack the 0th axis of a tensor using python's
iterable unpacking:
>>> t = tf.constant([1,2,3])
>>> a,b,c = t
`unstack` is still necessary because Iterable unpacking doesn't work in
a `@tf.function`: Symbolic tensors are not iterable.
You need to use `tf.unstack` here:
>>> @tf.function
... def bad(t):
... a,b,c = t
... return a
>>>
>>> bad(t)
Traceback (most recent call last):
...
OperatorNotAllowedInGraphError: ...
>>> @tf.function
... def good(t):
... a,b,c = tf.unstack(t)
... return a
>>>
>>> good(t).numpy()
1
#### Unknown shapes
Eager tensors have concrete values, so their shape is always known.
Inside a `tf.function` the symbolic tensors may have unknown shapes.
If the length of `axis` is unknown `tf.unstack` will fail because it cannot
handle an unknown number of tensors:
>>> @tf.function(input_signature=[tf.TensorSpec([None], tf.float32)])
... def bad(t):
... tensors = tf.unstack(t)
... return tensors[0]
>>>
>>> bad(tf.constant([1,2,3]))
Traceback (most recent call last):
...
ValueError: Cannot infer num from shape (None,)
If you know the `axis` length you can pass it as the `num` argument. But this
must be a constant value.
If you actually need a variable number of tensors in a single `tf.function`
trace, you will need to use exlicit loops and a `tf.TensorArray` instead.
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred if
`None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `axis` is out of the range `[-R, R)`.
ValueError: If `num` is unspecified and cannot be inferred.
InvalidArgumentError: If `num` does not match the shape of `value`.
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape.dims[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
@dispatch.add_dispatch_support
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
See also `tf.tile`, `tf.stack`, `tf.repeat`.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
>>> t1 = [[1, 2, 3], [4, 5, 6]]
>>> t2 = [[7, 8, 9], [10, 11, 12]]
>>> tf.concat([t1, t2], 0)
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.concat([t1, t2], 1)
<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 7, 8, 9],
[ 4, 5, 6, 10, 11, 12]], dtype=int32)>
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
>>> t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
>>> t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
>>> tf.concat([t1, t2], -1)
<tf.Tensor: shape=(2, 2, 4), dtype=int32, numpy=
array([[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]], dtype=int32)>
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing for
axis is 0-based. Positive axis in the rage of `[0, rank(values))` refers
to `axis`-th dimension. And negative axis refers to `axis +
rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_has_rank(0)
return identity(values[0], name=name)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export(v1=["boolean_mask"])
@dispatch.add_dispatch_support
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Examples:
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
tf.boolean_mask(tensor, mask) # [0, 2]
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
tf.boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
Args:
tensor: N-D Tensor.
mask: K-D boolean Tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where_v2(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(
tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
# TODO(yongtang): tf.reshape in C++ kernel might have set the shape
# correctly, so the following may not be needed? It still might be possible
# that there are some edge case where tensor_util.constant_value resolves
# more cases than ShapeInference of tf.reshape in C++ kernel.
if axis_value is not None:
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate(
[first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("boolean_mask", v1=[])
@dispatch.add_dispatch_support
def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
See also: `tf.ragged.boolean_mask`, which can be applied to both dense and
ragged tensors, and can be used if you need to preserve the masked dimensions
of `tensor` (rather than flattening them, as `tf.boolean_mask` does).
Examples:
>>> tensor = [0, 1, 2, 3] # 1-D example
>>> mask = np.array([True, False, True, False])
>>> tf.boolean_mask(tensor, mask)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([0, 2], dtype=int32)>
>>> tensor = [[1, 2], [3, 4], [5, 6]] # 2-D example
>>> mask = np.array([True, False, True])
>>> tf.boolean_mask(tensor, mask)
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[5, 6]], dtype=int32)>
Args:
tensor: N-D Tensor.
mask: K-D boolean Tensor, K <= N and K must be known statically.
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
return boolean_mask(tensor, mask, name, axis)
@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse.mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
@dispatch.add_dispatch_support
def unique(x, out_idx=dtypes.int32, name=None):
"""Finds unique elements in a 1-D tensor.
See also `tf.unique_with_counts`.
This operation returns a tensor `y` containing all of the unique elements
of `x` sorted in the same order that they occur in `x`. This operation
also returns a tensor `idx` the same size as `x` that contains the index
of each value of `x` in the unique output `y`. In other words:
y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]
Example usage:
>>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
>>> y, idx = unique(x)
>>> y
<tf.Tensor: id=5, shape=(5,), dtype=int32,
numpy=array([1, 2, 4, 7, 8], dtype=int32)>
>>> idx
<tf.Tensor: id=6, shape=(9,), dtype=int32,
numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>
Args:
x: A Tensor. 1-D.
out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to
tf.int32.
name: A name for the operation (optional).
Returns:
A tuple of Tensor objects (y, idx).
y: A Tensor. Has the same type as x.
idx: A Tensor of type out_idx.
"""
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
@dispatch.add_dispatch_support
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
"""Finds unique elements in a 1-D tensor.
See also `tf.unique`.
This operation returns a tensor `y` containing all of the unique elements
of `x` sorted in the same order that they occur in `x`. This operation
also returns a tensor `idx` the same size as `x` that contains the index
of each value of `x` in the unique output `y`. Finally, it returns a
third tensor `count` that contains the count of each element of `y`
in `x`. In other words:
y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]
Example usage:
>>> x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8])
>>> y, idx, count = unique_with_counts(x)
>>> y
<tf.Tensor: id=8, shape=(5,), dtype=int32,
numpy=array([1, 2, 4, 7, 8], dtype=int32)>
>>> idx
<tf.Tensor: id=9, shape=(9,), dtype=int32,
numpy=array([0, 0, 1, 2, 2, 2, 3, 4, 4], dtype=int32)>
>>> count
<tf.Tensor: id=10, shape=(5,), dtype=int32,
numpy=array([2, 1, 3, 1, 2], dtype=int32)>
Args:
x: A Tensor. 1-D.
out_idx: An optional tf.DType from: tf.int32, tf.int64. Defaults to
tf.int32.
name: A name for the operation (optional).
Returns:
A tuple of Tensor objects (y, idx, count).
y: A Tensor. Has the same type as x.
idx: A Tensor of type out_idx.
count: A Tensor of type out_idx.
"""
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
@dispatch.add_dispatch_support
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor `value` into a list of sub tensors.
See also `tf.unstack`.
If `num_or_size_splits` is an integer, then `value` is split along the
dimension `axis` into `num_or_size_splits` smaller tensors. This requires that
`value.shape[axis]` is divisible by `num_or_size_splits`.
If `num_or_size_splits` is a 1-D Tensor (or list), then `value` is split into
`len(num_or_size_splits)` elements. The shape of the `i`-th
element has the same size as the `value` except along dimension `axis` where
the size is `num_or_size_splits[i]`.
For example:
>>> x = tf.Variable(tf.random.uniform([5, 30], -1, 1))
>>>
>>> # Split `x` into 3 tensors along dimension 1
>>> s0, s1, s2 = tf.split(x, num_or_size_splits=3, axis=1)
>>> tf.shape(s0).numpy()
array([ 5, 10], dtype=int32)
>>>
>>> # Split `x` into 3 tensors with sizes [4, 15, 11] along dimension 1
>>> split0, split1, split2 = tf.split(x, [4, 15, 11], 1)
>>> tf.shape(split0).numpy()
array([5, 4], dtype=int32)
>>> tf.shape(split1).numpy()
array([ 5, 15], dtype=int32)
>>> tf.shape(split2).numpy()
array([ 5, 11], dtype=int32)
Args:
value: The `Tensor` to split.
num_or_size_splits: Either an integer indicating the number of splits along
`axis` or a 1-D integer `Tensor` or Python list containing the sizes of
each output tensor along `axis`. If a scalar, then it must evenly divide
`value.shape[axis]`; otherwise the sum of sizes along the split axis
must match that of the `value`.
axis: An integer or scalar `int32` `Tensor`. The dimension along which to
split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns a list of `num_or_size_splits`
`Tensor` objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
if isinstance(num_or_size_splits,
(numbers.Integral, tensor_shape.Dimension)):
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
size_splits = ops.convert_to_tensor(num_or_size_splits)
if size_splits._rank() == 0:
raise ValueError(
"Rank-0 tensors are not supported as the num_or_size_splits argument "
"to split. Argument provided: %s" % (num_or_size_splits,))
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose", v1=[])
@dispatch.add_dispatch_support
def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
"""Transposes `a`, where `a` is a Tensor.
Permutes the dimensions according to the value of `perm`.
The returned tensor's dimension `i` will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is the rank
of the input tensor. Hence by default, this operation performs a regular
matrix transpose on 2-D input Tensors.
If conjugate is `True` and `a.dtype` is either `complex64` or `complex128`
then the values of `a` are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
>>> x = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.transpose(x)
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)>
Equivalently, you could call `tf.transpose(x, perm=[1, 0])`.
If `x` is complex, setting conjugate=True gives the conjugate transpose:
>>> x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
... [4 + 4j, 5 + 5j, 6 + 6j]])
>>> tf.transpose(x, conjugate=True)
<tf.Tensor: shape=(3, 2), dtype=complex128, numpy=
array([[1.-1.j, 4.-4.j],
[2.-2.j, 5.-5.j],
[3.-3.j, 6.-6.j]])>
'perm' is more useful for n-dimensional tensors where n > 2:
>>> x = tf.constant([[[ 1, 2, 3],
... [ 4, 5, 6]],
... [[ 7, 8, 9],
... [10, 11, 12]]])
As above, simply calling `tf.transpose` will default to `perm=[2,1,0]`.
To take the transpose of the matrices in dimension-0 (such as when you are
transposing matrices where 0 is the batch dimension), you would set
`perm=[0,2,1]`.
>>> tf.transpose(x, perm=[0, 2, 1])
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[ 1, 4],
[ 2, 5],
[ 3, 6]],
[[ 7, 10],
[ 8, 11],
[ 9, 12]]], dtype=int32)>
Note: This has a shorthand `linalg.matrix_transpose`):
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`. This should be a vector.
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
@tf_export(v1=["transpose"])
@dispatch.add_dispatch_support
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`.
Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.matrix_transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if not tensor_util.is_tf_type(a):
a = ops.convert_to_tensor(a, name="a")
if conjugate and a.dtype.is_complex:
transpose_fn = gen_array_ops.conjugate_transpose
else:
transpose_fn = gen_array_ops.transpose
if perm is not None:
return transpose_fn(a, perm, name=name)
rank = a.shape.rank
if rank is None:
perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)
else:
perm = np.arange(rank - 1, -1, -1, dtype=np.int32)
return transpose_fn(a, perm, name=name)
# pylint: disable=invalid-name
@tf_export(
"linalg.matrix_transpose",
v1=["linalg.transpose", "linalg.matrix_transpose", "matrix_transpose"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.linalg.matrix_transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.matrix_transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.linalg.matrix_transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.linalg.matrix_transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `linalg.matrix_transpose` returns a new
tensor with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.math.conj(tf.linalg.matrix_transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a Python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
@tf_export("linalg.diag", v1=["linalg.diag", "matrix_diag"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_diag")
def matrix_diag(diagonal,
name="diag",
k=0,
num_rows=-1,
num_cols=-1,
padding_value=0,
align="RIGHT_LEFT"):
"""Returns a batched diagonal tensor with given batched diagonal values.
Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
diagonals of a matrix, with everything else padded with `padding`. `num_rows`
and `num_cols` specify the dimension of the innermost matrix of the output. If
both are not specified, the op assumes the innermost matrix is square and
infers its size from `k` and the innermost dimension of `diagonal`. If only
one of them is specified, the op assumes the unspecified value is the smallest
possible based on other criteria.
Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor
has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only
one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has
rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`.
The second innermost dimension of `diagonal` has double meaning. When `k` is
scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and
the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
padding_value ; otherwise
```
Otherwise, `M` is treated as the number of diagonals for the matrix in the
same batch (`M = k[1]-k[0]+1`), and the output tensor is:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
padding_value ; otherwise
```
where `d = n - m`, `diag_index = k[1] - d`, and
`index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
```
# The main diagonal.
diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4)
[5, 6, 7, 8]])
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4)
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]],
[[5, 0, 0, 0],
[0, 6, 0, 0],
[0, 0, 7, 0],
[0, 0, 0, 8]]]
# A superdiagonal (per batch).
diagonal = np.array([[1, 2, 3], # Input shape: (2, 3)
[4, 5, 6]])
tf.matrix_diag(diagonal, k = 1)
==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4)
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]],
[[0, 4, 0, 0],
[0, 0, 5, 0],
[0, 0, 0, 6],
[0, 0, 0, 0]]]
# A tridiagonal band (per batch).
diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3)
[1, 2, 3],
[0, 4, 5]],
[[2, 3, 0],
[6, 7, 9],
[0, 9, 1]]])
tf.matrix_diag(diagonals, k = (-1, 1))
==> [[[1, 8, 0], # Output shape: (2, 3, 3)
[4, 2, 9],
[0, 5, 3]],
[[6, 2, 0],
[9, 7, 3],
[0, 1, 9]]]
# RIGHT_LEFT alignment.
diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3)
[1, 2, 3],
[4, 5, 0]],
[[0, 2, 3],
[6, 7, 9],
[9, 1, 0]]])
tf.matrix_diag(diagonals, k = (-1, 1), align="RIGHT_LEFT")
==> [[[1, 8, 0], # Output shape: (2, 3, 3)
[4, 2, 9],
[0, 5, 3]],
[[6, 2, 0],
[9, 7, 3],
[0, 1, 9]]]
# Rectangular matrix.
diagonal = np.array([1, 2]) # Input shape: (2)
tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
==> [[0, 0, 0, 0], # Output shape: (3, 4)
[1, 0, 0, 0],
[0, 2, 0, 0]]
# Rectangular matrix with inferred num_cols and padding_value = 9.
tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
==> [[9, 9], # Output shape: (3, 2)
[1, 9],
[9, 2]]
```
Args:
diagonal: A `Tensor` with `rank k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
num_rows: The number of rows of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
num_cols: The number of columns of the output matrix. If it is not provided,
the op assumes the output matrix is a square matrix and infers the matrix
size from `d_lower`, `d_upper`, and the innermost dimension of `diagonal`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns:
A Tensor. Has the same type as `diagonal`.
"""
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(diagonal, "dtype") and diagonal.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_v3(
diagonal=diagonal,
k=k,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value,
align=align,
name=name)
@tf_export("linalg.diag_part", v1=["linalg.diag_part", "matrix_diag_part"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_diag_part")
def matrix_diag_part(
input, # pylint:disable=redefined-builtin
name="diag_part",
k=0,
padding_value=0,
align="RIGHT_LEFT"):
"""Returns the batched diagonal part of a batched tensor.
Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
`input`.
Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
Let `max_diag_len` be the maximum length among all diagonals to be extracted,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
Let `num_diags` be the number of diagonals to extract,
`num_diags = k[1] - k[0] + 1`.
If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
`[I, J, ..., L, max_diag_len]` and values:
```
diagonal[i, j, ..., l, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
padding_value ; otherwise.
```
where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
Otherwise, the output tensor has rank `r` with dimensions
`[I, J, ..., L, num_diags, max_diag_len]` with values:
```
diagonal[i, j, ..., l, m, n]
= input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
padding_value ; otherwise.
```
where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
The input must be at least a matrix.
For example:
```
input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4)
[5, 6, 7, 8],
[9, 8, 7, 6]],
[[5, 4, 3, 2],
[1, 2, 3, 4],
[5, 6, 7, 8]]])
# A main diagonal from each batch.
tf.linalg.diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3)
[5, 2, 7]]
# A superdiagonal from each batch.
tf.linalg.diag_part(input, k = 1)
==> [[2, 7, 6], # Output shape: (2, 3)
[4, 3, 8]]
# A band from each batch.
tf.linalg.diag_part(input, k = (-1, 2))
==> [[[3, 8, 0], # Output shape: (2, 4, 3)
[2, 7, 6],
[1, 6, 7],
[0, 5, 8]],
[[3, 4, 0],
[4, 3, 8],
[5, 2, 7],
[0, 1, 6]]]
# RIGHT_LEFT alignment.
tf.linalg.diag_part(input, k = (-1, 2), align="RIGHT_LEFT")
==> [[[0, 3, 8], # Output shape: (2, 4, 3)
[2, 7, 6],
[1, 6, 7],
[5, 8, 0]],
[[0, 3, 4],
[4, 3, 8],
[5, 2, 7],
[1, 6, 0]]]
# max_diag_len can be shorter than the main diagonal.
tf.linalg.diag_part(input, k = (-2, -1))
==> [[[5, 8],
[0, 9]],
[[1, 6],
[0, 5]]]
# padding_value = 9
tf.linalg.diag_part(input, k = (1, 3), padding_value = 9)
==> [[[4, 9, 9], # Output shape: (2, 3, 3)
[3, 8, 9],
[2, 7, 6]],
[[2, 9, 9],
[3, 4, 9],
[4, 3, 8]]]
```
Args:
input: A `Tensor` with `rank k >= 2`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
padding_value: The value to fill the area outside the specified diagonal
band with. Default is 0.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
Returns:
A Tensor containing diagonals of `input`. Has the same type as `input`.
Raises:
InvalidArgumentError: When `k` is out of bound or when `k[0]>k[1:]`.
"""
# Special case to sidestep the tf.constant conversion error:
# TypeError: Expected bool, got 0 of type 'int' instead.
if hasattr(input, "dtype") and input.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_part_v3(
input=input, k=k, padding_value=padding_value, align=align, name=name)
@tf_export(
"linalg.tensor_diag_part", v1=["linalg.tensor_diag_part", "diag_part"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("diag_part")
def tensor_diag_part(
input, # pylint:disable=redefined-builtin
name=None):
"""Returns the diagonal part of the tensor.
This operation returns a tensor with the `diagonal` part
of the `input`. The `diagonal` part is computed as follows:
Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
tensor of rank `k` with dimensions `[D1,..., Dk]` where:
`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
For a rank 2 tensor, `linalg.diag_part` and `linalg.tensor_diag_part`
produce the same result. For rank 3 and higher, linalg.diag_part extracts
the diagonal of each inner-most matrix in the tensor. An example where
they differ is given below.
>>> x = [[[[1111,1112],[1121,1122]],
... [[1211,1212],[1221,1222]]],
... [[[2111, 2112], [2121, 2122]],
... [[2211, 2212], [2221, 2222]]]
... ]
>>> tf.linalg.tensor_diag_part(x)
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1111, 1212],
[2121, 2222]], dtype=int32)>
>>> tf.linalg.diag_part(x).shape
TensorShape([2, 2, 2])
Args:
input: A `Tensor` with rank `2k`.
name: A name for the operation (optional).
Returns:
A Tensor containing diagonals of `input`. Has the same type as `input`, and
rank `k`.
"""
return gen_array_ops.diag_part(input=input, name=name)
@tf_export("linalg.set_diag", v1=["linalg.set_diag", "matrix_set_diag"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_set_diag")
def matrix_set_diag(
input, # pylint:disable=redefined-builtin
diagonal,
name="set_diag",
k=0,
align="RIGHT_LEFT"):
"""Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the
same shape and values as `input`, except for the specified diagonals of the
innermost matrices. These will be overwritten by the values in `diagonal`.
`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
If `k` is scalar or `k[0] == k[1]`:
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
input[i, j, ..., l, m, n] ; otherwise
```
Otherwise,
```
output[i, j, ..., l, m, n]
= diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
input[i, j, ..., l, m, n] ; otherwise
```
where `d = n - m`, `diag_index = k[1] - d`, and
`index_in_diag = n - max(d, 0) + offset`.
`offset` is zero except when the alignment of the diagonal is to the right.
```
offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
and `d >= 0`) or
(`align` in {LEFT_RIGHT, RIGHT_RIGHT}
and `d <= 0`)
0 ; otherwise
```
where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
For example:
```
# The main diagonal.
input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4)
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]])
diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3)
[4, 5, 6]])
tf.matrix_set_diag(input, diagonal)
==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4)
[7, 2, 7, 7],
[7, 7, 3, 7]],
[[4, 7, 7, 7],
[7, 5, 7, 7],
[7, 7, 6, 7]]]
# A superdiagonal (per batch).
tf.matrix_set_diag(input, diagonal, k = 1)
==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4)
[7, 7, 2, 7],
[7, 7, 7, 3]],
[[7, 4, 7, 7],
[7, 7, 5, 7],
[7, 7, 7, 6]]]
# A band of diagonals.
diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3)
[6, 5, 8],
[1, 2, 3],
[0, 4, 5]],
[[1, 2, 0],
[5, 6, 4],
[6, 1, 2],
[0, 3, 4]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2))
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
[4, 2, 5, 1],
[7, 5, 3, 8]],
[[6, 5, 1, 7],
[3, 1, 6, 2],
[7, 4, 2, 4]]]
# RIGHT_LEFT alignment.
diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3)
[6, 5, 8],
[1, 2, 3],
[4, 5, 0]],
[[0, 1, 2],
[5, 6, 4],
[6, 1, 2],
[3, 4, 0]]])
tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="RIGHT_LEFT")
==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4)
[4, 2, 5, 1],
[7, 5, 3, 8]],
[[6, 5, 1, 7],
[3, 1, 6, 2],
[7, 4, 2, 4]]]
```
Args:
input: A `Tensor` with rank `k + 1`, where `k >= 1`.
diagonal: A `Tensor` with rank `k`, when `d_lower == d_upper`, or `k + 1`,
otherwise. `k >= 1`.
name: A name for the operation (optional).
k: Diagonal offset(s). Positive value means superdiagonal, 0 refers to the
main diagonal, and negative value means subdiagonals. `k` can be a single
integer (for a single diagonal) or a pair of integers specifying the low
and high ends of a matrix band. `k[0]` must not be larger than `k[1]`.
align: Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should
be aligned, respectively. There are four possible alignments: "RIGHT_LEFT"
(default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT"
aligns superdiagonals to the right (left-pads the row) and subdiagonals to
the left (right-pads the row). It is the packing format LAPACK uses.
cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment.
"""
return gen_array_ops.matrix_set_diag_v3(
input=input, diagonal=diagonal, k=k, align=align, name=name)
# pylint: enable=invalid-name
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except (NotImplementedError, TypeError):
# Happens when shape is a Tensor, list with Tensor elements, etc.
pass
return None
def _tag_zeros_tensor(fun):
""" Tags the result of function by setting _is_zeros_tensor attribute.
This is useful to compute Hessians of fused ops such as cross_entropy.
"""
def wrapped(*args, **kwargs):
tensor = fun(*args, **kwargs)
tensor._is_zeros_tensor = True
return tensor
return tf_decorator.make_decorator(fun, wrapped)
@tf_export("zeros")
@dispatch.add_dispatch_support
@_tag_zeros_tensor
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
See also `tf.zeros_like`, `tf.ones`, `tf.fill`, `tf.eye`.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
>>> tf.zeros([3, 4], tf.int32)
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int32)>
Args:
shape: A `list` of integers, a `tuple` of integers, or
a 1-D `Tensor` of type `int32`.
dtype: The DType of an element in the resulting `Tensor`.
name: Optional string. A name for the operation.
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
elif dtype.is_quantized:
zero = np.zeros([]).astype(dtype.as_numpy_dtype)
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# Create a constant if it won't be very big. Otherwise create a fill
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError, errors.UnimplementedError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["zeros_like"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
See also `tf.zeros`.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
Examples:
>>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.zeros_like(tensor)
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 0, 0],
[0, 0, 0]], dtype=int32)>
>>> tf.zeros_like(tensor, dtype=tf.float32)
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)>
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`. (optional)
name: A name for the operation (optional).
optimize: if `True`, attempt to statically determine the shape of `tensor`
and encode it as a constant. (optional, defaults to `True`)
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(tensor, dtype, name, optimize)
@tf_export("zeros_like", v1=[])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def zeros_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
See also `tf.zeros`.
Given a single tensor or array-like object (`input`), this operation returns
a tensor of the same type and shape as `input` with all elements set to zero.
Optionally, you can use `dtype` to specify a new type for the returned tensor.
Examples:
>>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.zeros_like(tensor)
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 0, 0],
[0, 0, 0]], dtype=int32)>
>>> tf.zeros_like(tensor, dtype=tf.float32)
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)>
>>> tf.zeros_like([[1, 2, 3], [4, 5, 6]])
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[0, 0, 0],
[0, 0, 0]], dtype=int32)>
Args:
input: A `Tensor` or array-like object.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string` (optional).
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(input, dtype, name, optimize=True)
@_tag_zeros_tensor
def zeros_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 zeros_like API calls."""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
if not tensor_util.is_tf_type(tensor):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor_shape = tensor.shape
tensor_dtype = tensor.dtype
if context.executing_eagerly():
if dtype is not None and dtype != tensor_dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
return gen_array_ops.zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor_shape.is_fully_defined() and
tensor_dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor_shape, dtype=dtype or tensor_dtype, name=name)
if dtype is not None and dtype != tensor_dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export(v1=["ones_like"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
See also `tf.ones`.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`,
`complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor' and
encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
return ones_like_impl(tensor, dtype, name, optimize)
@tf_export("ones_like", v1=[])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def ones_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor of all ones that has the same shape as the input.
See also `tf.ones`.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to 1. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
>>> tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.ones_like(tensor)
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)>
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to one.
"""
return ones_like_impl(input, dtype, name, optimize=True)
def ones_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 ones_like API calls."""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
@dispatch.add_dispatch_support
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to one (1).
See also `tf.ones_like`, `tf.zeros`, `tf.fill`, `tf.eye`.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to one.
>>> tf.ones([3, 4], tf.int32)
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]], dtype=int32)>
Args:
shape: A `list` of integers, a `tuple` of integers, or
a 1-D `Tensor` of type `int32`.
dtype: Optional DType of an element in the resulting `Tensor`. Default is
`tf.float32`.
name: Optional string. A name for the operation.
Returns:
A `Tensor` with all elements set to one (1).
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
if dtype == dtypes.bool:
one = True
elif dtype.is_quantized:
one = np.ones([]).astype(dtype.as_numpy_dtype)
else:
one = 1
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# Create a constant if it won't be very big. Otherwise create a fill
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["placeholder"])
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
@compatibility(TF2)
This API is not compatible with eager execution and `tf.function`. To migrate
to TF2, rewrite the code to be compatible with eager execution. Check the
[migration
guide](https://www.tensorflow.org/guide/migrate#1_replace_v1sessionrun_calls)
on replacing `Session.run` calls. In TF2, you can just pass tensors directly
into ops and layers. If you want to explicitly set up your inputs, also see
[Keras functional API](https://www.tensorflow.org/guide/keras/functional) on
how to use `tf.keras.Input` to replace `tf.compat.v1.placeholder`.
`tf.function` arguments also do the job of `tf.compat.v1.placeholder`.
For more details please read [Better
performance with tf.function](https://www.tensorflow.org/guide/function).
@end_compatibility
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
@tf_export(v1=["placeholder_with_default"])
def placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin
"""A placeholder op that passes through `input` when its output is not fed.
@compatibility(TF2)
This API is strongly discouraged for use with eager execution and
`tf.function`. The primary use of this API is for testing computation wrapped
within a `tf.function` where the input tensors might not have statically known
fully-defined shapes. The same can be achieved by creating a
[concrete function](
https://www.tensorflow.org/guide/function#obtaining_concrete_functions)
from the `tf.function` with a `tf.TensorSpec` input which has partially
defined shapes. For example, the code
>>> @tf.function
... def f():
... x = tf.compat.v1.placeholder_with_default(
... tf.constant([[1., 2., 3.], [4., 5., 6.]]), [None, 3])
... y = tf.constant([[1.],[2.], [3.]])
... z = tf.matmul(x, y)
... assert z.shape[0] == None
... assert z.shape[1] == 1
>>> f()
can easily be replaced by
>>> @tf.function
... def f(x):
... y = tf.constant([[1.],[2.], [3.]])
... z = tf.matmul(x, y)
... assert z.shape[0] == None
... assert z.shape[1] == 1
>>> g = f.get_concrete_function(tf.TensorSpec([None, 3]))
You can learn more about `tf.function` at [Better
performance with tf.function](https://www.tensorflow.org/guide/function).
@end_compatibility
Args:
input: A `Tensor`. The default value to produce when output is not fed.
shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of
the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
return gen_array_ops.placeholder_with_default(input, shape, name)
@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.compat.v1.sparse.placeholder(tf.float32)
y = tf.sparse.reduce_sum(x)
with tf.compat.v1.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.compat.v1.SparseTensorValue(indices, values, shape)})) # Will
succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.sparse.SparseTensor(indices=indices, values=values,
dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("`sparse_placeholder` is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
default_shape_name = (name + "/shape_default") if name is not None else None
if shape is None:
rank = None
dense_shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)
else:
if isinstance(shape, ops.Tensor):
rank = shape.get_shape()[0]
dense_shape_default = tensor_util.constant_value_as_shape(shape)
else:
rank = len(shape)
# determine the shape, to override the `.shape` property of the
# `SparseTensor`
dense_shape_default = tensor_shape.TensorShape(
tuple(None if dim == -1 else dim for dim in shape))
shape = tuple(tensor_shape.dimension_value(dim) for dim in shape)
shape = tuple(-1 if dim is None else dim for dim in shape)
shape = ops.convert_to_tensor(
shape, dtype=dtypes.int64, name=default_shape_name)
# `dense_shape` needs to be feedable (for users that treat this as an
# actual placeholder). `constant_value_as_shape` sets constants to
# not-feedable. placeholder_with_default works, but blocks `SparseTensor`
# from reading the default value back out.
dense_shape = placeholder_with_default(
shape, shape=shape.shape, name=shape_name)
result = sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=dense_shape)
# Now the SparseTensor.shape is a list of `None`s, since it couldn't read the
# default shape out of the placeholder. Override that
# shape to be the value determined here, so partial shapes can be
# propagated.
result._dense_shape_default = dense_shape_default
return result
# pylint: enable=redefined-outer-name
@tf_export("pad", v1=[])
@dispatch.add_dispatch_support
def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
return pad(tensor, paddings, mode, name, constant_values)
@tf_export(v1=["pad"])
@dispatch.add_dispatch_support
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if not tensor_util.is_tf_type(constant_values) and constant_values == 0:
result = gen_array_ops.pad(tensor, paddings, name=name)
else:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = _get_paddings_constant(paddings)
input_shape = (
tensor_shape.TensorShape(tensor.shape)
if isinstance(tensor, ops.Tensor) else result.op.inputs[0].shape)
if (input_shape.ndims is not None and
not result.shape.is_fully_defined() and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
def _get_paddings_constant(paddings):
"""Helper to get the constant values of the paddings arg to pad().
Used under V1 graph mode to facilitate computation of the shape of the output
tensor of `pad()`.
Args:
paddings: The same paddings arg as passed to pad(). Can be a Tensor, or
a nested list or tuple of Tensor and/or numbers.
Returns:
A nested list or numbers or `None`, in which `None` indicates unknown
padding size.
"""
if isinstance(paddings, ops.Tensor):
return tensor_util.constant_value(paddings, partial=True)
elif isinstance(paddings, (list, tuple)):
return [_get_paddings_constant(x) for x in paddings]
else:
return paddings
@tf_export("meshgrid")
@dispatch.add_dispatch_support
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
if not ndim:
return []
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
@dispatch.add_dispatch_support
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example:
Given the following input,
* `hypothesis` is a `tf.SparseTensor` of shape `[2, 1, 1]`
* `truth` is a `tf.SparseTensor` of shape `[2, 2, 2]`
>>> hypothesis = tf.SparseTensor(
... [[0, 0, 0],
... [1, 0, 0]],
... ["a", "b"],
... (2, 1, 1))
>>> truth = tf.SparseTensor(
... [[0, 1, 0],
... [1, 0, 0],
... [1, 0, 1],
... [1, 1, 0]],
... ["a", "b", "c", "a"],
... (2, 2, 2))
>>> tf.edit_distance(hypothesis, truth, normalize=True)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[inf, 1. ],
[0.5, 1. ]], dtype=float32)>
The operation returns a dense Tensor of shape `[2, 2]` with
edit distances normalized by `truth` lengths.
**Note**: It is possible to calculate edit distance between two
sparse tensors with variable-length values. However, attempting to create
them while eager execution is enabled will result in a `ValueError`.
For the following inputs,
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.sparse.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.sparse.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
# The output would be a dense Tensor of shape `(2,)`, with edit distances
normalized by 'truth' lengths.
# output => array([0., 0.5], dtype=float32)
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("QuantizeAndDequantizeV4")
def _QuantizeAndDequantizeV4Grad(op, grad):
"""Gradient for QuantizeAndDequantizeV4 op."""
return quantize_and_dequantize_v4_grad(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
axis=op.get_attr("axis"))
@ops.RegisterGradient("QuantizeAndDequantizeV4Grad")
def _QuantizeAndDequantizeV4GradGrad(op, grad):
"""Gradient for QuantizeAndDequantizeV4Grad op."""
return _QuantizeAndDequantizeV4Grad(op, grad)
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape().dims[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],
name="crops")
return result_paddings, result_crops
@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
paddings,
block_size=None,
name=None,
block_shape=None): # pylint: disable=redefined-builtin
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
@dispatch.add_dispatch_support
def space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin
return space_to_batch_nd(input, block_shape, paddings, name)
space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("nn.space_to_depth", v1=[])
@dispatch.add_dispatch_support
def space_to_depth_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("nn.depth_to_space", v1=[])
@dispatch.add_dispatch_support
def depth_to_space_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export(v1=["batch_to_space"])
@dispatch.add_dispatch_support
def batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("batch_to_space", v1=[])
@dispatch.add_dispatch_support
def batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin
"""BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
shape `block_shape + [batch]`, interleaves these blocks back into the grid
defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
same rank as the input. The spatial dimensions of this intermediate result
are then optionally cropped according to `crops` to produce the output. This
is the reverse of SpaceToBatch (see `tf.space_to_batch`).
Args:
input: A N-D `Tensor` with shape `input_shape = [batch] + spatial_shape +
remaining_shape`, where `spatial_shape` has M dimensions.
block_shape: A 1-D `Tensor` with shape [M]. Must be one of the following
types: `int32`, `int64`. All values must be >= 1. For backwards
compatibility with TF 1.0, this parameter may be an int, in which case it
is converted to
`numpy.array([block_shape, block_shape],
dtype=numpy.int64)`.
crops: A 2-D `Tensor` with shape `[M, 2]`. Must be one of the
following types: `int32`, `int64`. All values must be >= 0.
`crops[i] = [crop_start, crop_end]` specifies the amount to crop from
input dimension `i + 1`, which corresponds to spatial dimension `i`.
It is required that
`crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
1. Reshape `input` to `reshaped` of shape: [block_shape[0], ...,
block_shape[M-1], batch / prod(block_shape), input_shape[1], ...,
input_shape[N-1]]
2. Permute dimensions of `reshaped` to produce `permuted` of shape
[batch / prod(block_shape), input_shape[1], block_shape[0], ...,
input_shape[M], block_shape[M-1], input_shape[M+1],
..., input_shape[N-1]]
3. Reshape `permuted` to produce `reshaped_permuted` of shape
[batch / prod(block_shape), input_shape[1] * block_shape[0], ...,
input_shape[M] * block_shape[M-1], input_shape[M+1], ...,
input_shape[N-1]]
4. Crop the start and end of dimensions `[1, ..., M]` of
`reshaped_permuted` according to `crops` to produce the output
of shape:
[batch / prod(block_shape), input_shape[1] *
block_shape[0] - crops[0,0] - crops[0,1], ..., input_shape[M] *
block_shape[M-1] - crops[M-1,0] - crops[M-1,1], input_shape[M+1],
..., input_shape[N-1]]
name: A name for the operation (optional).
Examples:
1. For the following input of shape `[4, 1, 1, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
[[[[1]]],
[[[2]]],
[[[3]]],
[[[4]]]]
```
The output tensor has shape `[1, 2, 2, 1]` and value:
```
x = [[[[1], [2]],
[[3], [4]]]]
```
2. For the following input of shape `[4, 1, 1, 3]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
[[[1, 2, 3]],
[[4, 5, 6]],
[[7, 8, 9]],
[[10, 11, 12]]]
```
The output tensor has shape `[1, 2, 2, 3]` and value:
```python
x = [[[[1, 2, 3], [4, 5, 6 ]],
[[7, 8, 9], [10, 11, 12]]]]
```
3. For the following
input of shape `[4, 2, 2, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```python
x = [[[[1], [3]], [[ 9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
```python
x = [[[1], [2], [ 3], [ 4]],
[[5], [6], [ 7], [ 8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]
```
4. For the following input of shape
`[8, 1, 3, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:
```python
x = [[[[0], [ 1], [ 3]]],
[[[0], [ 9], [11]]],
[[[0], [ 2], [ 4]]],
[[[0], [10], [12]]],
[[[0], [ 5], [ 7]]],
[[[0], [13], [15]]],
[[[0], [ 6], [ 8]]],
[[[0], [14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
```python
x = [[[[ 1], [ 2], [ 3], [ 4]],
[[ 5], [ 6], [ 7], [ 8]]],
[[[ 9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
```
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(block_shape, int):
block_shape = np.array([block_shape, block_shape], dtype=np.int64)
return batch_to_space_nd(
input=input, block_shape=block_shape, crops=crops, name=name)
@tf_export("one_hot")
@dispatch.add_dispatch_support
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
See also `tf.fill`, `tf.eye`.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `indices` is a RaggedTensor, the 'axis' argument must be positive and refer
to a non-ragged axis. The output will be equivalent to applying 'one_hot' on
the values of the RaggedTensor, and creating a new RaggedTensor from the
result.
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
indices = tf.ragged.constant([[0, 1], [2]])
depth = 3
tf.one_hot(indices, depth) # output: [2 x None x 3]
# [[[1., 0., 0.],
# [0., 1., 0.]],
# [[0., 0., 1.]]]
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(
name, "one_hot",
[indices, depth, on_value, off_value, axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
if on_exists:
on_value = ops.convert_to_tensor(on_value, dtype_hint=dtype)
if off_exists:
off_value = ops.convert_to_tensor(off_value, dtype_hint=dtype)
on_dtype = on_value.dtype.base_dtype if on_exists else None
off_dtype = off_value.dtype.base_dtype if off_exists else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
@dispatch.add_dispatch_support
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.is_compatible_with(dtype):
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export(v1=["squeeze"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
>>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
>>> t = tf.ones([1, 2, 1, 3, 1, 1])
>>> print(tf.shape(tf.squeeze(t)).numpy())
[2 3]
Or, to remove specific size 1 dimensions:
>>> # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
>>> t = tf.ones([1, 2, 1, 3, 1, 1])
>>> print(tf.shape(tf.squeeze(t, [2, 4])).numpy())
[1 2 3 1]
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "squeeze_dims",
squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("squeeze", v1=[])
@dispatch.add_dispatch_support
def squeeze_v2(input, axis=None, name=None):
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Unlike the older op `tf.compat.v1.squeeze`, this op does not accept a
deprecated `squeeze_dims` argument.
Note: if `input` is a `tf.RaggedTensor`, then this operation takes `O(N)`
time, where `N` is the number of elements in the squeezed dimensions.
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`. If specified, only
squeezes the dimensions listed. The dimension index starts at 0. It is an
error to squeeze a dimension that is not 1. Must be in the range
`[-rank(input), rank(input))`. Must be specified if `input` is a
`RaggedTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: The input cannot be converted to a tensor, or the specified
axis cannot be squeezed.
"""
# pylint: disable=redefined-builtin
return squeeze(input, axis, name)
@tf_export(v1=["where"])
@dispatch.add_dispatch_support
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are tensors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
@compatibility(TF2)
This API is compatible with eager execution and `tf.function`. However, this
is still a legacy API endpoint originally designed for TF1. To migrate to
fully-native TF2, please replace its usage with `tf.where` instead, which is
directly backwards compatible with `tf.compat.v1.where`.
However,`tf.compat.v1.where` is more restrictive than `tf.where`, requiring
`x` and `y` to have the same shape, and returning a `Tensor` with the same
type and shape as `x`, `y` (if they are both non-None).
`tf.where` will accept `x`, `y` that are not the same shape as long as they
are broadcastable with one another and with `condition`, and will return a
`Tensor` with shape broadcast from `condition`, `x`, and `y`.
For example, the following works with `tf.where` but not `tf.compat.v1.where`:
>>> tf.where([True, False, False, True], [1,2,3,4], [100])
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],
dtype=int32)>
>>> tf.where(True, [1,2,3,4], 100)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4],
dtype=int32)>
@end_compatibility
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
@tf_export("where", v1=["where_v2"])
@dispatch.add_dispatch_support
def where_v2(condition, x=None, y=None, name=None):
"""Return the elements where `condition` is `True` (multiplexing `x` and `y`).
This operator has two modes: in one mode both `x` and `y` are provided, in
another mode neither are provided. `condition` is always expected to be a
`tf.Tensor` of type `bool`.
#### Retrieving indices of `True` elements
If `x` and `y` are not provided (both are None):
`tf.where` will return the indices of `condition` that are `True`, in
the form of a 2-D tensor with shape (n, d).
(Where n is the number of matching indices in `condition`,
and d is the number of dimensions in `condition`).
Indices are output in row-major order.
>>> tf.where([True, False, False, True])
<tf.Tensor: shape=(2, 1), dtype=int64, numpy=
array([[0],
[3]])>
>>> tf.where([[True, False], [False, True]])
<tf.Tensor: shape=(2, 2), dtype=int64, numpy=
array([[0, 0],
[1, 1]])>
>>> tf.where([[[True, False], [False, True], [True, True]]])
<tf.Tensor: shape=(4, 3), dtype=int64, numpy=
array([[0, 0, 0],
[0, 1, 1],
[0, 2, 0],
[0, 2, 1]])>
#### Multiplexing between `x` and `y`
If `x` and `y` are provided (both have non-None values):
`tf.where` will choose an output shape from the shapes of `condition`, `x`,
and `y` that all three shapes are
[broadcastable](https://docs.scipy.org/doc/numpy/reference/ufuncs.html) to.
The `condition` tensor acts as a mask that chooses whether the corresponding
element / row in the output should be taken from `x`
(if the element in `condition` is True) or `y` (if it is false).
>>> tf.where([True, False, False, True], [1,2,3,4], [100,200,300,400])
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 200, 300, 4],
dtype=int32)>
>>> tf.where([True, False, False, True], [1,2,3,4], [100])
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],
dtype=int32)>
>>> tf.where([True, False, False, True], [1,2,3,4], 100)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 4],
dtype=int32)>
>>> tf.where([True, False, False, True], 1, 100)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([ 1, 100, 100, 1],
dtype=int32)>
>>> tf.where(True, [1,2,3,4], 100)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4],
dtype=int32)>
>>> tf.where(False, [1,2,3,4], 100)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([100, 100, 100, 100],
dtype=int32)>
Note that if the gradient of either branch of the tf.where generates
a NaN, then the gradient of the entire tf.where will be NaN. This is because
the gradient calculation for tf.where combines the two branches, for
performance reasons.
A workaround is to use an inner tf.where to ensure the function has
no asymptote, and to avoid computing a value whose gradient is NaN by
replacing dangerous inputs with safe inputs.
Instead of this,
>>> x = tf.constant(0., dtype=tf.float32)
>>> with tf.GradientTape() as tape:
... tape.watch(x)
... y = tf.where(x < 1., 0., 1. / x)
>>> print(tape.gradient(y, x))
tf.Tensor(nan, shape=(), dtype=float32)
Although, the `1. / x` values are never used, its gradient is a NaN when x =
0. Instead, we should guard that with another `tf.where`
>>> x = tf.constant(0., dtype=tf.float32)
>>> with tf.GradientTape() as tape:
... tape.watch(x)
... safe_x = tf.where(tf.equal(x, 0.), 1., x)
... y = tf.where(x < 1., 0., 1. / safe_x)
>>> print(tape.gradient(y, x))
tf.Tensor(0.0, shape=(), dtype=float32)
Args:
condition: A `tf.Tensor` of type `bool`
x: If provided, a Tensor which is of the same type as `y`, and has a shape
broadcastable with `condition` and `y`.
y: If provided, a Tensor which is of the same type as `x`, and has a shape
broadcastable with `condition` and `x`.
name: A name of the operation (optional).
Returns:
If `x` and `y` are provided:
A `Tensor` with the same type as `x` and `y`, and shape that
is broadcast from `condition`, `x`, and `y`.
Otherwise, a `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None, or the shapes
are not all broadcastable.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export(v1=["reverse_sequence"])
@deprecation.deprecated_args(None,
"seq_dim is deprecated, use seq_axis instead",
"seq_dim")
@deprecation.deprecated_args(None,
"batch_dim is deprecated, use batch_axis instead",
"batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
"""Reverses variable length slices.
This op first slices `input` along the dimension `batch_axis`, and for
each slice `i`, reverses the first `seq_lengths[i]` elements along the
dimension `seq_axis`.
The elements of `seq_lengths` must obey `seq_lengths[i] <=
input.dims[seq_axis]`, and `seq_lengths` must be a vector of length
`input.dims[batch_axis]`.
The output slice `i` along dimension `batch_axis` is then given by
input slice `i`, with the first `seq_lengths[i]` slices along
dimension `seq_axis` reversed.
Example usage:
>>> seq_lengths = [7, 2, 3, 5]
>>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],
... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]
>>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)
>>> output
<tf.Tensor: shape=(4, 8), dtype=int32, numpy=
array([[0, 0, 5, 4, 3, 2, 1, 0],
[2, 1, 0, 0, 0, 0, 0, 0],
[3, 2, 1, 4, 0, 0, 0, 0],
[5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>
Args:
input: A `Tensor`. The input to reverse.
seq_lengths: A `Tensor`. Must be one of the following types: `int32`,
`int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <=
input.dims(seq_axis)`
seq_axis: An `int`. The dimension which is partially reversed.
batch_axis: An optional `int`. Defaults to `0`. The dimension along which
reversal is performed.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as input.
"""
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
@tf_export("reverse_sequence", v1=[])
@dispatch.add_dispatch_support
def reverse_sequence_v2(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None):
"""Reverses variable length slices.
This op first slices `input` along the dimension `batch_axis`, and for
each slice `i`, reverses the first `seq_lengths[i]` elements along the
dimension `seq_axis`.
The elements of `seq_lengths` must obey `seq_lengths[i] <=
input.dims[seq_axis]`, and `seq_lengths` must be a vector of length
`input.dims[batch_axis]`.
The output slice `i` along dimension `batch_axis` is then given by
input slice `i`, with the first `seq_lengths[i]` slices along
dimension `seq_axis` reversed.
Example usage:
>>> seq_lengths = [7, 2, 3, 5]
>>> input = [[1, 2, 3, 4, 5, 0, 0, 0], [1, 2, 0, 0, 0, 0, 0, 0],
... [1, 2, 3, 4, 0, 0, 0, 0], [1, 2, 3, 4, 5, 6, 7, 8]]
>>> output = tf.reverse_sequence(input, seq_lengths, seq_axis=1, batch_axis=0)
>>> output
<tf.Tensor: shape=(4, 8), dtype=int32, numpy=
array([[0, 0, 5, 4, 3, 2, 1, 0],
[2, 1, 0, 0, 0, 0, 0, 0],
[3, 2, 1, 4, 0, 0, 0, 0],
[5, 4, 3, 2, 1, 6, 7, 8]], dtype=int32)>
Args:
input: A `Tensor`. The input to reverse.
seq_lengths: A `Tensor`. Must be one of the following types: `int32`,
`int64`. 1-D with length `input.dims(batch_axis)` and `max(seq_lengths) <=
input.dims(seq_axis)`
seq_axis: An `int`. The dimension which is partially reversed.
batch_axis: An optional `int`. Defaults to `0`. The dimension along which
reversal is performed.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as input.
"""
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
("The `validate_indices` argument has no effect. "
"Indices are always validated on CPU and never "
"validated on GPU."),
("validate_indices", None))
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0): # pylint: disable=g-doc-args
r"""Gather slices from params axis `axis` according to indices.
Gather slices from `params` axis `axis` according to `indices`. `indices`
must be an integer tensor of any dimension (often 1-D).
`Tensor.__getitem__` works for scalars, `tf.newaxis`, and
[python slices](https://numpy.org/doc/stable/reference/arrays.indexing.html#basic-slicing-and-indexing)
`tf.gather` extends indexing to handle tensors of indices.
In the simplest case it's identical to scalar indexing:
>>> params = tf.constant(['p0', 'p1', 'p2', 'p3', 'p4', 'p5'])
>>> params[3].numpy()
b'p3'
>>> tf.gather(params, 3).numpy()
b'p3'
The most common case is to pass a single axis tensor of indices (this
can't be expressed as a python slice because the indices are not sequential):
>>> indices = [2, 0, 2, 5]
>>> tf.gather(params, indices).numpy()
array([b'p2', b'p0', b'p2', b'p5'], dtype=object)
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
alt>
</div>
The indices can have any shape. When the `params` has 1 axis, the
output shape is equal to the input shape:
>>> tf.gather(params, [[2, 0], [2, 5]]).numpy()
array([[b'p2', b'p0'],
[b'p2', b'p5']], dtype=object)
The `params` may also have any shape. `gather` can select slices
across any axis depending on the `axis` argument (which defaults to 0).
Below it is used to gather first rows, then columns from a matrix:
>>> params = tf.constant([[0, 1.0, 2.0],
... [10.0, 11.0, 12.0],
... [20.0, 21.0, 22.0],
... [30.0, 31.0, 32.0]])
>>> tf.gather(params, indices=[3,1]).numpy()
array([[30., 31., 32.],
[10., 11., 12.]], dtype=float32)
>>> tf.gather(params, indices=[2,1], axis=1).numpy()
array([[ 2., 1.],
[12., 11.],
[22., 21.],
[32., 31.]], dtype=float32)
More generally: The output shape has the same shape as the input, with the
indexed-axis replaced by the shape of the indices.
>>> def result_shape(p_shape, i_shape, axis=0):
... return p_shape[:axis] + i_shape + p_shape[axis+1:]
>>>
>>> result_shape([1, 2, 3], [], axis=1)
[1, 3]
>>> result_shape([1, 2, 3], [7], axis=1)
[1, 7, 3]
>>> result_shape([1, 2, 3], [7, 5], axis=1)
[1, 7, 5, 3]
Here are some examples:
>>> params.shape.as_list()
[4, 3]
>>> indices = tf.constant([[0, 2]])
>>> tf.gather(params, indices=indices, axis=0).shape.as_list()
[1, 2, 3]
>>> tf.gather(params, indices=indices, axis=1).shape.as_list()
[4, 1, 2]
>>> params = tf.random.normal(shape=(5, 6, 7, 8))
>>> indices = tf.random.uniform(shape=(10, 11), maxval=7, dtype=tf.int32)
>>> result = tf.gather(params, indices, axis=2)
>>> result.shape.as_list()
[5, 6, 10, 11, 8]
This is because each index takes a slice from `params`, and
places it at the corresponding location in the output. For the above example
>>> # For any location in indices
>>> a, b = 0, 1
>>> tf.reduce_all(
... # the corresponding slice of the result
... result[:, :, a, b, :] ==
... # is equal to the slice of `params` along `axis` at the index.
... params[:, :, indices[a, b], :]
... ).numpy()
True
### Batching:
The `batch_dims` argument lets you gather different items from each element
of a batch.
Using `batch_dims=1` is equivalent to having an outer loop over the first
axis of `params` and `indices`:
>>> params = tf.constant([
... [0, 0, 1, 0, 2],
... [3, 0, 0, 0, 4],
... [0, 5, 0, 6, 0]])
>>> indices = tf.constant([
... [2, 4],
... [0, 4],
... [1, 3]])
>>> tf.gather(params, indices, axis=1, batch_dims=1).numpy()
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)
This is is equivalent to:
>>> def manually_batched_gather(params, indices, axis):
... batch_dims=1
... result = []
... for p,i in zip(params, indices):
... r = tf.gather(p, i, axis=axis-batch_dims)
... result.append(r)
... return tf.stack(result)
>>> manually_batched_gather(params, indices, axis=1).numpy()
array([[1, 2],
[3, 4],
[5, 6]], dtype=int32)
Higher values of `batch_dims` are equivalent to multiple nested loops over
the outer axes of `params` and `indices`. So the overall shape function is
>>> def batched_result_shape(p_shape, i_shape, axis=0, batch_dims=0):
... return p_shape[:axis] + i_shape[batch_dims:] + p_shape[axis+1:]
>>>
>>> batched_result_shape(
... p_shape=params.shape.as_list(),
... i_shape=indices.shape.as_list(),
... axis=1,
... batch_dims=1)
[3, 2]
>>> tf.gather(params, indices, axis=1, batch_dims=1).shape.as_list()
[3, 2]
This comes up naturally if you need to use the indices of an operation like
`tf.argsort`, or `tf.math.top_k` where the last dimension of the indices
indexes into the last dimension of input, at the corresponding location.
In this case you can use `tf.gather(values, indices, batch_dims=-1)`.
See also:
* `tf.Tensor.__getitem__`: The direct tensor index operation (`t[]`), handles
scalars and python-slices `tensor[..., 7, 1:-1]`
* `tf.scatter`: A collection of operations similar to `__setitem__`
(`t[i] = x`)
* `tf.gather_nd`: An operation similar to `tf.gather` but gathers across
multiple axis at once (it can gather elements of a matrix instead of rows
or columns)
* `tf.boolean_mask`, `tf.where`: Binary indexing.
* `tf.slice` and `tf.strided_slice`: For lower level access to the
implementation of `__getitem__`'s python-slice handling (`t[1:-1:2]`)
Args:
params: The `Tensor` from which to gather values. Must be at least rank
`axis + 1`.
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. The values must be in range `[0, params.shape[axis])`.
validate_indices: Deprecated, does nothing. Indices are always validated on
CPU, never validated on GPU.
Caution: On CPU, if an out of bound index is found, an error is raised.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
batch_dims: An `integer`. The number of batch dimensions. Must be less
than or equal to `rank(indices)`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
del validate_indices
if axis is None:
axis = batch_dims
if tensor_util.constant_value(axis) != 0:
return gen_array_ops.gather_v2(
params, indices, axis, batch_dims=batch_dims, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
@tf_export("gather", v1=[])
@dispatch.add_dispatch_support
def gather_v2(params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
return gather(
params,
indices,
validate_indices=validate_indices,
name=name,
axis=axis,
batch_dims=batch_dims)
gather_v2.__doc__ = gather.__doc__
@tf_export(v1=["batch_gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims=-1` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
"""Gather slices from params according to indices with leading batch dims."""
with ops.name_scope(name, "BatchGather", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if indices.shape.ndims is None:
raise ValueError(
"batch_gather does not allow indices with unknown shape.")
return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
def _batch_gather(params, indices, batch_dims, axis=None):
r"""Gather slices from params according to indices with leading batch dims.
This operation assumes that the leading `batch_dims` dimensions of `indices`
and `params` are batch dimensions; and performs a `tf.gather` operation within
each batch. (If `batch_dims` is not specified, then it defaults to
`rank(indices)-1`.) In the case in which `batch_dims==0`, this operation
is equivalent to `tf.gather`.
Args:
params: A Tensor. The tensor from which to gather values.
indices: A Tensor. Must be one of the following types: int32, int64. Index
tensor. Must be in range `[0, params.shape[batch_dims]]`.
batch_dims: An integer or none. The number of batch dimensions. Must be
less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
Returns:
A Tensor. Has the same type as `params`.
Raises:
ValueError: if `indices` has an unknown shape.
"""
if batch_dims is not None and not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError("tf.gather does not allow indices with unknown "
"rank when batch_dims is specified.")
if batch_dims is None:
batch_dims = indices_ndims - 1
if batch_dims < 0:
batch_dims += indices_ndims
if batch_dims < 0 or batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params.shape.ndims))
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, recursively calling batch_gather with axis=0, and then
# transposing the result to put the pre-axis dimensions before the indices
# dimensions.
if axis is not None and axis != batch_dims:
# Adjust axis to be positive.
if not isinstance(axis, int):
axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
elif axis < 0 and params.shape.ndims is None:
axis = axis + array_ops.rank(params)
else:
if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
raise ValueError("axis (%d) out of range [%d, %d)" %
(axis, -params.shape.ndims, params.shape.ndims))
if axis < 0:
axis += params.shape.ndims
if axis < batch_dims:
raise ValueError("batch_dims = %d must be less than or equal to "
"axis = %d" % (batch_dims, axis))
# Move params[axis] up to params[batch_dims].
perm = [
list(range(batch_dims)), [axis],
gen_math_ops._range(batch_dims, axis, 1),
gen_math_ops._range(axis + 1, rank(params), 1)
]
params = transpose(params, concat(perm, axis=0))
result = _batch_gather(params, indices, batch_dims=batch_dims)
# Move the result dimensions corresponding to params[batch_dims:axis]
# to just before the dimensions corresponding to indices[batch_dims:].
params_start = indices_ndims + axis - batch_dims
perm = [
list(range(batch_dims)),
gen_math_ops._range(indices_ndims, params_start, 1),
list(range(batch_dims, indices_ndims)),
gen_math_ops._range(params_start, rank(result), 1)
]
return transpose(result, perm=concat(perm, axis=0))
indices_shape = shape(indices)
params_shape = shape(params)
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
accum_dim_value = ones((), dtype=indices_dtype)
# Use correct type for offset index computation
casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = zeros((), dtype=indices_dtype)
step = ones((), dtype=indices_dtype)
dim_indices = gen_math_ops._range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = stack(
[1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
batch_indices += reshape(dim_indices, dim_shape)
flat_indices = reshape(batch_indices, [-1])
outer_shape = params_shape[batch_dims + 1:]
flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
False)
flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
axis=0))
flat_result = gather(flat_params, flat_indices)
result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
final_shape = indices.get_shape()[:batch_dims].merge_with(
params.get_shape()[:batch_dims])
final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
result.set_shape(final_shape)
return result
@tf_export(v1=["gather_nd", "manip.gather_nd"])
@dispatch.add_dispatch_support
@deprecated_endpoints("manip.gather_nd")
def gather_nd(params, indices, name=None, batch_dims=0):
r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
`indices` is an K-dimensional integer tensor, best thought of as a
(K-1)-dimensional tensor of indices into `params`, where each element defines
a slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of
`params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements
(if `indices.shape[-1] == params.rank`) or slices
(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
Additionally both 'params' and 'indices' can have M leading batch
dimensions that exactly match. In this case 'batch_dims' must be M.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
The examples below are for the case when only indices have leading extra
dimensions. If both 'params' and 'indices' have leading batch dimensions, use
the 'batch_dims' parameter to run gather_nd in batch mode.
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
Examples with batched 'params' and 'indices':
```python
batch_dims = 1
indices = [[1], [0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
batch_dims = 1
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0']], [['a1', 'b1']]]
batch_dims = 1
indices = [[[1, 0]], [[0, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0'], ['b1']]
```
See also `tf.gather`.
Args:
params: A `Tensor`. The tensor from which to gather values.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
name: A name for the operation (optional).
batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.
Returns:
A `Tensor`. Has the same type as `params`.
"""
batch_dims_ = tensor_util.constant_value(batch_dims)
if batch_dims_ is not None:
batch_dims = int(batch_dims_)
if batch_dims == 0:
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.gather_nd(indices, name=name)
except AttributeError:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)
@tf_export("gather_nd", v1=[])
@dispatch.add_dispatch_support
def gather_nd_v2(params, indices, batch_dims=0, name=None):
return gather_nd(params, indices, name=name, batch_dims=batch_dims)
gather_nd_v2.__doc__ = gather_nd.__doc__
def batch_gather_nd(params, indices, batch_dims, name=None):
"""gather_nd implementation with batch support."""
with ops.name_scope(name, "BatchGatherND", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
if batch_dims < 0:
raise ValueError("tf.gather_nd does not allow negative batch_dims.")
params_ndims = params.shape.ndims
indices_ndims = indices.shape.ndims
if indices_ndims is not None and batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params_ndims is not None and batch_dims >= params_ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params_ndims))
expand = batch_dims == 0
if expand:
# Normally gather_nd will be called when batch_dims == 0.
# But if this function is called with batch_dims = 0, e.g. for testing
# purposes, this adds a dummy batch dimension to make batch_dims = 1.
params = expand_dims(params, axis=0)
indices = expand_dims(indices, axis=0)
batch_dims = 1
params_shape = shape(params)
indices_shape = shape(indices)
batch_shape = params_shape[:batch_dims]
batch_size = gen_math_ops.prod(batch_shape, [0])
index_internal_ndims = rank(indices) - batch_dims - 1
indices_internal_shape = indices_shape[batch_dims:-1]
# Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
# with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
# 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
# to the entire 'params' tensor.
# Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
# grid of size B1 x B2.
batch_dim_list = unstack(batch_shape, axis=0)
dim_ranges = [
gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
for x in batch_dim_list
]
mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
# Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
index_grid = transpose(stack(flat_list, axis=0))
# We need to concatenate these batch coordinates with the internal indices.
# concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
# So we reshape them both to [(B1.B2), i1, ..., iK, *]
index_grid_shape = shape(index_grid)
index_grid = reshape(
index_grid,
concat([
index_grid_shape[:1],
ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]
],
axis=0))
tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
index_grid = tile(index_grid, multiples=tile_shape)
# index_grid now has shape [(B1.B2), i1, ..., iK, 2]
flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
flat_indices = reshape(indices, shape=flat_shape)
# flat_indices now has shape [(B1.B2), i1, ..., iK, C]
indices = concat((index_grid, flat_indices), axis=-1)
# indices has shape [(B1.B2), i1, ..., iK, 2+C]
out = gen_array_ops.gather_nd(params, indices)
# out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
# its original form.
out_shape = shape(out)
out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
if expand:
out = squeeze(out, axis=0)
return out
@deprecation.deprecated_endpoints("tensor_scatter_update")
@tf_export(
"tensor_scatter_nd_update",
v1=["tensor_scatter_nd_update", "tensor_scatter_update"])
@dispatch.add_dispatch_support
def tensor_scatter_nd_update(tensor, indices, updates, name=None):
""""Scatter `updates` into an existing tensor according to `indices`.
This operation creates a new tensor by applying sparse `updates` to the
input `tensor`. This is similar to an index assignment.
```
# Not implemented: tensors cannot be updated inplace.
tensor[indices] = updates
```
If an out of bound index is found on CPU, an error is returned.
> **WARNING**: There are some GPU specific semantics for this operation.
>
> - If an out of bound index is found, the index is ignored.
> - The order in which updates are applied is nondeterministic, so the output
> will be nondeterministic if `indices` contains duplicates.
This operation is very similar to `tf.scatter_nd`, except that the updates are
scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
for the existing tensor cannot be re-used, a copy is made and updated.
In general:
* `indices` is an integer tensor - the indices to update in `tensor`.
* `indices` has **at least two** axes, the last axis is the depth of the
index vectors.
* For each index vector in `indices` there is a corresponding entry in
`updates`.
* If the length of the index vectors matches the rank of the `tensor`, then
the index vectors each point to scalars in `tensor` and each update is a
scalar.
* If the length of the index vectors is less than the rank of `tensor`, then
the index vectors each point to slices of `tensor` and shape of the updates
must match that slice.
Overall this leads to the following shape constraints:
```
assert tf.rank(indices) >= 2
index_depth = indices.shape[-1]
batch_shape = indices.shape[:-1]
assert index_depth <= tf.rank(tensor)
outer_shape = tensor.shape[:index_depth]
inner_shape = tensor.shape[index_depth:]
assert updates.shape == batch_shape + inner_shape
```
Typical usage is often much simpler than this general form, and it
can be better understood starting with simple examples:
### Scalar updates
The simplest usage inserts scalar elements into a tensor by index.
In this case, the `index_depth` must equal the rank of the
input `tensor`, slice each column of `indices` is an index into an axis of the
input `tensor`.
In this simplest case the shape constraints are:
```
num_updates, index_depth = indices.shape.as_list()
assert updates.shape == [num_updates]
assert index_depth == tf.rank(tensor)`
```
For example, to insert 4 scattered elements in a rank-1 tensor with
8 elements.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%"
src="https://www.tensorflow.org/images/ScatterNd1.png">
</div>
This scatter operation would look like this:
>>> tensor = [0, 0, 0, 0, 0, 0, 0, 0] # tf.rank(tensor) == 1
>>> indices = [[1], [3], [4], [7]] # num_updates == 4, index_depth == 1
>>> updates = [9, 10, 11, 12] # num_updates == 4
>>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))
tf.Tensor([ 0 9 0 10 11 0 0 12], shape=(8,), dtype=int32)
The length (first axis) of `updates` must equal the length of the `indices`:
`num_updates`. This is the number of updates being inserted. Each scalar
update is inserted into `tensor` at the indexed location.
For a higher rank input `tensor` scalar updates can be inserted by using an
`index_depth` that matches `tf.rank(tensor)`:
>>> tensor = [[1, 1], [1, 1], [1, 1]] # tf.rank(tensor) == 2
>>> indices = [[0, 1], [2, 0]] # num_updates == 2, index_depth == 2
>>> updates = [5, 10] # num_updates == 2
>>> print(tf.tensor_scatter_nd_update(tensor, indices, updates))
tf.Tensor(
[[ 1 5]
[ 1 1]
[10 1]], shape=(3, 2), dtype=int32)
### Slice updates
When the input `tensor` has more than one axis scatter can be used to update
entire slices.
In this case it's helpful to think of the input `tensor` as being a two level
array-of-arrays. The shape of this two level array is split into the
`outer_shape` and the `inner_shape`.
`indices` indexes into the outer level of the input tensor (`outer_shape`).
and replaces the sub-array at that location with the corresponding item from
the `updates` list. The shape of each update is `inner_shape`.
When updating a list of slices the shape constraints are:
```
num_updates, index_depth = indices.shape.as_list()
inner_shape = tensor.shape[:index_depth]
outer_shape = tensor.shape[index_depth:]
assert updates.shape == [num_updates, inner_shape]
```
For example, to update rows of a `(6, 3)` `tensor`:
>>> tensor = tf.zeros([6, 3], dtype=tf.int32)
Use an index depth of one.
>>> indices = tf.constant([[2], [4]]) # num_updates == 2, index_depth == 1
>>> num_updates, index_depth = indices.shape.as_list()
The `outer_shape` is `6`, the inner shape is `3`:
>>> outer_shape = tensor.shape[:index_depth]
>>> inner_shape = tensor.shape[index_depth:]
2 rows are being indexed so 2 `updates` must be supplied.
Each update must be shaped to match the `inner_shape`.
>>> # num_updates == 2, inner_shape==3
>>> updates = tf.constant([[1, 2, 3],
... [4, 5, 6]])
Altogether this gives:
>>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()
array([[0, 0, 0],
[0, 0, 0],
[1, 2, 3],
[0, 0, 0],
[4, 5, 6],
[0, 0, 0]], dtype=int32)
#### More slice update examples
A tensor representing a batch of uniformly sized video clips naturally has 5
axes: `[batch_size, time, width, height, channels]`.
For example:
>>> batch_size, time, width, height, channels = 13,11,7,5,3
>>> video_batch = tf.zeros([batch_size, time, width, height, channels])
To replace a selection of video clips:
* Use an `index_depth` of 1 (indexing the `outer_shape`: `[batch_size]`)
* Provide updates each with a shape matching the `inner_shape`:
`[time, width, height, channels]`.
To replace the first two clips with ones:
>>> indices = [[0],[1]]
>>> new_clips = tf.ones([2, time, width, height, channels])
>>> tf.tensor_scatter_nd_update(video_batch, indices, new_clips)
To replace a selection of frames in the videos:
* `indices` must have an `index_depth` of 2 for the `outer_shape`:
`[batch_size, time]`.
* `updates` must be shaped like a list of images. Each update must have a
shape, matching the `inner_shape`: `[width, height, channels]`.
To replace the first frame of the first three video clips:
>>> indices = [[0, 0], [1, 0], [2, 0]] # num_updates=3, index_depth=2
>>> new_images = tf.ones([
... # num_updates=3, inner_shape=(width, height, channels)
... 3, width, height, channels])
>>> tf.tensor_scatter_nd_update(video_batch, indices, new_images)
### Folded indices
In simple cases it's convenient to think of `indices` and `updates` as
lists, but this is not a strict requirement. Instead of a flat `num_updates`,
the `indices` and `updates` can be folded into a `batch_shape`. This
`batch_shape` is all axes of the `indices`, except for the innermost
`index_depth` axis.
```
index_depth = indices.shape[-1]
batch_shape = indices.shape[:-1]
```
Note: The one exception is that the `batch_shape` cannot be `[]`. You can't
update a single index by passing indices with shape `[index_depth]`.
`updates` must have a matching `batch_shape` (the axes before `inner_shape`).
```
assert updates.shape == batch_shape + inner_shape
```
Note: The result is equivalent to flattening the `batch_shape` axes of
`indices` and `updates`. This generalization just avoids the need
for reshapes when it is more natural to construct "folded" indices and
updates.
With this generalization the full shape constraints are:
```
assert tf.rank(indices) >= 2
index_depth = indices.shape[-1]
batch_shape = indices.shape[:-1]
assert index_depth <= tf.rank(tensor)
outer_shape = tensor.shape[:index_depth]
inner_shape = tensor.shape[index_depth:]
assert updates.shape == batch_shape + inner_shape
```
For example, to draw an `X` on a `(5,5)` matrix start with these indices:
>>> tensor = tf.zeros([5,5])
>>> indices = tf.constant([
... [[0,0],
... [1,1],
... [2,2],
... [3,3],
... [4,4]],
... [[0,4],
... [1,3],
... [2,2],
... [3,1],
... [4,0]],
... ])
>>> indices.shape.as_list() # batch_shape == [2, 5], index_depth == 2
[2, 5, 2]
Here the `indices` do not have a shape of `[num_updates, index_depth]`, but a
shape of `batch_shape+[index_depth]`.
Since the `index_depth` is equal to the rank of `tensor`:
* `outer_shape` is `(5,5)`
* `inner_shape` is `()` - each update is scalar
* `updates.shape` is `batch_shape + inner_shape == (5,2) + ()`
>>> updates = [
... [1,1,1,1,1],
... [1,1,1,1,1],
... ]
Putting this together gives:
>>> tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()
array([[1., 0., 0., 0., 1.],
[0., 1., 0., 1., 0.],
[0., 0., 1., 0., 0.],
[0., 1., 0., 1., 0.],
[1., 0., 0., 0., 1.]], dtype=float32)
Args:
tensor: Tensor to copy/update.
indices: Indices to update.
updates: Updates to apply at the indices.
name: Optional name for the operation.
Returns:
A new tensor with the given shape and updates applied according to the
indices.
"""
return gen_array_ops.tensor_scatter_update(
tensor=tensor, indices=indices, updates=updates, name=name)
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
# (And also now because of 'axis' processing).
@tf_export(v1=["quantize_v2"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
"instead.") # pylint: disable=missing-docstring
def quantize_v2(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO",
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if ensure_minimum_range != 0.01:
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis)
quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
# We want to expose tf.quantization.quantize instead of
# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next
# version of TensorFlow.
@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("quantize")
def quantize(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None,
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
"""Quantize the input tensor."""
if ensure_minimum_range != 0.01:
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis)
@tf_export("quantization.dequantize", v1=["quantization.dequantize",
"dequantize"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("dequantize")
def dequantize( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
min_range,
max_range,
mode="MIN_COMBINED",
name=None,
axis=None,
narrow_range=False,
dtype=dtypes.float32):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if axis >= 0 or narrow_range:
return gen_array_ops.dequantize(
input,
min_range,
max_range,
mode=mode,
name=name,
narrow_range=narrow_range,
axis=axis,
dtype=dtype)
return gen_array_ops.dequantize(
input, min_range, max_range, mode=mode, name=name, dtype=dtype)
dequantize.__doc__ = gen_array_ops.dequantize.__doc__
@tf_export("quantization.quantize_and_dequantize")
@dispatch.add_dispatch_support
@deprecation.deprecated(None,
"This Op has been deprecated, use" +
"`quantize_and_dequantize_v2` instead. To " +
"To simulate the V1 the behavior of " +
"tf.quantization.quantize_and_dequantize(...) use " +
"tf.grad_pass_through(" +
"tf.quantization.quantize_and_dequantize_v2)(...).")
def quantize_and_dequantize(
input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False,
axis=None):
"""Quantizes then dequantizes a tensor.
Args:
input: A `Tensor` to quantize and dequantize.
input_min: If range_given=True, the minimum input value, that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of minimum values for each slice along axis.
input_max: If range_given=True, the maximum input value that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of maximum values for each slice along axis.
signed_input: True if the quantization is signed or unsigned.
num_bits: The bitwidth of the quantization.
range_given: If true use `input_min` and `input_max` for the range of the
input, otherwise determine min and max from the input `Tensor`.
round_mode: Rounding mode when rounding from float values to quantized ones.
one of ['HALF_TO_EVEN', 'HALF_UP']
name: Optional name for the operation.
narrow_range: If true, then the absolute value of the quantized minimum
value is the same as the quantized maximum value, instead of 1 greater.
i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
axis: Integer. If specified, refers to a dimension of the input tensor, such
that quantization will be per slice along that dimension.
Returns:
A `Tensor`. Each element is the result of quantizing and dequantizing the
corresponding element of `input`.
"""
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
return gen_array_ops.quantize_and_dequantize_v2(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
name=name)
@tf_export("quantization.quantize_and_dequantize_v2")
@dispatch.add_dispatch_support
def quantize_and_dequantize_v2(
input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False,
axis=None):
"""Quantizes then dequantizes a tensor.
Updates the gradient definition for quantization that is outside the range to
be 0.To simulate the V1 the behavior of
tf.quantization.quantize_and_dequantize(...) use
tf.grad_pass_through(tf.quantization.quantize_and_dequantize_v2)(...).
Example usage:
```python
def getQuantizeOp(input):
input_tensor = tf.placeholder(tf.float32, shape=[4, 4])
net = tf.quantization.quantize_and_dequantize(input,
input_min=min_threshold,
input_max=max_threshold,
range_given=True)
To simulate v1 behavior:
def testDecomposeQuantizeDequantize(self):
def f(input_tensor):
return tf.quantization.quantize_and_dequantize_v2(input_tensor,
input_min = 5.0,
input_max= -10.0,
range_given=True)
input_tensor = tf.placeholder(tf.float32, shape=[4, 4])
net = tf.grad_pass_through(f)(input_tensor)
```
Args:
input: A `Tensor` to quantize and dequantize.
input_min: If range_given=True, the minimum input value, that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of minimum values for each slice along axis.
input_max: If range_given=True, the maximum input value that needs to be
represented in the quantized representation. If axis is specified, this
should be a vector of maximum values for each slice along axis.
signed_input: True if the quantization is signed or unsigned.
num_bits: The bitwidth of the quantization.
range_given: If true use `input_min` and `input_max` for the range of the
input, otherwise determine min and max from the input `Tensor`.
round_mode: Rounding mode when rounding from float values to quantized ones.
one of ['HALF_TO_EVEN', 'HALF_UP']
name: Optional name for the operation.
narrow_range: If true, then the absolute value of the quantized minimum
value is the same as the quantized maximum value, instead of 1 greater.
i.e. for 8 bit quantization, the minimum value is -127 instead of -128.
axis: Integer. If specified, refers to a dimension of the input tensor, such
that quantization will be per slice along that dimension.
Returns:
A `Tensor`. Each element is the result of quantizing and dequantizing the
corresponding element of `input`.
"""
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
return gen_array_ops.quantize_and_dequantize_v4(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
name=name)
@tf_export("searchsorted")
@dispatch.add_dispatch_support
def searchsorted(sorted_sequence,
values,
side="left",
out_type=dtypes.int32,
name=None):
"""Searches for where a value would go in a sorted sequence.
This is not a method for checking containment (like python `in`).
The typical use case for this operation is "binning", "bucketing", or
"discretizing". The `values` are assigned to bucket-indices based on the
**edges** listed in `sorted_sequence`. This operation
returns the bucket-index for each value.
>>> edges = [-1, 3.3, 9.1, 10.0]
>>> values = [0.0, 4.1, 12.0]
>>> tf.searchsorted(edges, values).numpy()
array([1, 2, 4], dtype=int32)
The `side` argument controls which index is returned if a value lands exactly
on an edge:
>>> seq = [0, 3, 9, 10, 10]
>>> values = [0, 4, 10]
>>> tf.searchsorted(seq, values).numpy()
array([0, 2, 3], dtype=int32)
>>> tf.searchsorted(seq, values, side="right").numpy()
array([1, 2, 5], dtype=int32)
The `axis` is not settable for this operation. It always operates on the
innermost dimension (`axis=-1`). The operation will accept any number of
outer dimensions. Here it is applied to the rows of a matrix:
>>> sorted_sequence = [[0., 3., 8., 9., 10.],
... [1., 2., 3., 4., 5.]]
>>> values = [[9.8, 2.1, 4.3],
... [0.1, 6.6, 4.5, ]]
>>> tf.searchsorted(sorted_sequence, values).numpy()
array([[4, 1, 2],
[0, 5, 4]], dtype=int32)
Note: This operation assumes that `sorted_sequence` **is sorted** along the
innermost axis, maybe using `tf.sort(..., axis=-1)`. **If the sequence is not
sorted no error is raised** and the content of the returned tensor is not well
defined.
Args:
sorted_sequence: N-D `Tensor` containing a sorted sequence.
values: N-D `Tensor` containing the search values.
side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
upper_bound.
out_type: The output type (`int32` or `int64`). Default is `tf.int32`.
name: Optional name for the operation.
Returns:
An N-D `Tensor` the size of `values` containing the result of applying
either lower_bound or upper_bound (depending on side) to each value. The
result is not a global index to the entire `Tensor`, but the index in the
last dimension.
Raises:
ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
If the total size of `values` exceeds `2^31 - 1` elements.
If the first `N-1` dimensions of the two tensors don't match.
"""
sequence_size = shape_internal(sorted_sequence)[-1]
values_size = shape_internal(values)[-1]
sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
values_2d = reshape(values, [-1, values_size])
if side == "right":
output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
name)
elif side == "left":
output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
name)
else:
raise ValueError("side must be either 'right' or 'left'. Saw: %s." % side)
return reshape(output, shape_internal(values))
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
@tf_export("image.extract_patches")
@dispatch.add_dispatch_support
def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):
r"""Extract `patches` from `images`.
This op collects patches from the input image, as if applying a
convolution. All extracted patches are stacked in the depth (last) dimension
of the output.
Specifically, the op extracts patches of shape `sizes` which are `strides`
apart in the input image. The output is subsampled using the `rates` argument,
in the same manner as "atrous" or "dilated" convolutions.
The result is a 4D tensor which is indexed by batch, row, and column.
`output[i, x, y]` contains a flattened patch of size `sizes[1], sizes[2]`
which is taken from the input starting at
`images[i, x*strides[1], y*strides[2]]`.
Each output patch can be reshaped to `sizes[1], sizes[2], depth`, where
`depth` is `images.shape[3]`.
The output elements are taken from the input at intervals given by the `rate`
argument, as in dilated convolutions.
The `padding` argument has no effect on the size of each patch, it determines
how many patches are extracted. If `VALID`, only patches which are fully
contained in the input image are included. If `SAME`, all patches whose
starting point is inside the input are included, and areas outside the input
default to zero.
Example:
```
n = 10
# images is a 1 x 10 x 10 x 1 array that contains the numbers 1 through 100
images = [[[[x * n + y + 1] for y in range(n)] for x in range(n)]]
# We generate two outputs as follows:
# 1. 3x3 patches with stride length 5
# 2. Same as above, but the rate is increased to 2
tf.image.extract_patches(images=images,
sizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 1, 1, 1],
padding='VALID')
# Yields:
[[[[ 1 2 3 11 12 13 21 22 23]
[ 6 7 8 16 17 18 26 27 28]]
[[51 52 53 61 62 63 71 72 73]
[56 57 58 66 67 68 76 77 78]]]]
```
If we mark the pixels in the input image which are taken for the output with
`*`, we see the pattern:
```
* * * 4 5 * * * 9 10
* * * 14 15 * * * 19 20
* * * 24 25 * * * 29 30
31 32 33 34 35 36 37 38 39 40
41 42 43 44 45 46 47 48 49 50
* * * 54 55 * * * 59 60
* * * 64 65 * * * 69 70
* * * 74 75 * * * 79 80
81 82 83 84 85 86 87 88 89 90
91 92 93 94 95 96 97 98 99 100
```
```
tf.image.extract_patches(images=images,
sizes=[1, 3, 3, 1],
strides=[1, 5, 5, 1],
rates=[1, 2, 2, 1],
padding='VALID')
# Yields:
[[[[ 1 3 5 21 23 25 41 43 45]
[ 6 8 10 26 28 30 46 48 50]]
[[ 51 53 55 71 73 75 91 93 95]
[ 56 58 60 76 78 80 96 98 100]]]]
```
We can again draw the effect, this time using the symbols `*`, `x`, `+` and
`o` to distinguish the patches:
```
* 2 * 4 * x 7 x 9 x
11 12 13 14 15 16 17 18 19 20
* 22 * 24 * x 27 x 29 x
31 32 33 34 35 36 37 38 39 40
* 42 * 44 * x 47 x 49 x
+ 52 + 54 + o 57 o 59 o
61 62 63 64 65 66 67 68 69 70
+ 72 + 74 + o 77 o 79 o
81 82 83 84 85 86 87 88 89 90
+ 92 + 94 + o 97 o 99 o
```
Args:
images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
sizes: The size of the extracted patches. Must be
`[1, size_rows, size_cols, 1]`.
strides: A 1-D Tensor of length 4. How far the centers of two consecutive
patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
This is the input stride, specifying how far two consecutive patch samples
are in the input. Equivalent to extracting patches with `patch_sizes_eff =
patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
them spatially by a factor of `rates`. This is equivalent to `rate` in
dilated (a.k.a. Atrous) convolutions.
padding: The type of padding algorithm to use.
name: A name for the operation (optional).
Returns:
A 4-D Tensor of the same type as the input.
"""
return gen_array_ops.extract_image_patches(images, sizes, strides, rates,
padding, name)
@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "ksizes is deprecated, use sizes instead",
"ksizes")
def extract_image_patches( # pylint: disable=missing-docstring
images,
ksizes=None,
strides=None,
rates=None,
padding=None,
name=None,
sizes=None):
"""Extract patches from images and put them in the "depth" output dimension.
Args:
`images`: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`,
`uint16`, `half`, `uint32`, `uint64`. 4-D Tensor with shape
`[batch, in_rows, in_cols, depth]`. `ksizes`: A list of `ints` that has
length `>= 4`. The size of the sliding window for each
dimension of `images`. `strides`: A list of `ints` that has length `>= 4`.
1-D of length 4. How far the centers of two consecutive
patches are in the images. Must be:
`[1, stride_rows, stride_cols, 1]`. `rates`: A list of `ints`
that has length `>= 4`. 1-D of length 4. Must be: `[1, rate_rows, rate_cols,
1]`. This is the input stride, specifying how far two consecutive patch
samples are in the input. Equivalent to extracting patches with
`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`,
followed by subsampling them spatially by a factor of `rates`. This is
equivalent to `rate` in dilated (a.k.a. Atrous) convolutions.
`padding`: A `string` from: "SAME", "VALID". The type of padding algorithm
to use.
We specify the size-related attributes as: ``` ksizes = [1, ksize_rows,
ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1] rates = [1,
rates_rows, rates_cols, 1]
name: A name for the operation (optional). ```
Returns:
A Tensor. Has the same type as images.
"""
ksizes = deprecation.deprecated_argument_lookup("sizes", sizes, "ksizes",
ksizes)
return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,
padding, name)
extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
@tf_export("fingerprint")
@dispatch.add_dispatch_support
def fingerprint(data, method="farmhash64", name=None):
r"""Generates fingerprint values.
Generates fingerprint values of `data`.
Fingerprint op considers the first dimension of `data` as the batch dimension,
and `output[i]` contains the fingerprint value generated from contents in
`data[i, ...]` for all `i`.
Fingerprint op writes fingerprint values as byte arrays. For example, the
default method `farmhash64` generates a 64-bit fingerprint value at a time.
This 8-byte value is written out as an `tf.uint8` array of size 8, in
little-endian order.
For example, suppose that `data` has data type `tf.int32` and shape (2, 3, 4),
and that the fingerprint method is `farmhash64`. In this case, the output
shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the
size of each fingerprint value in bytes. `output[0, :]` is generated from
12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from
other 12 integers in `data[1, :, :]`.
Note that this op fingerprints the raw underlying buffer, and it does not
fingerprint Tensor's metadata such as data type and/or shape. For example, the
fingerprint values are invariant under reshapes and bitcasts as long as the
batch dimension remain the same:
```python
tf.fingerprint(data) == tf.fingerprint(tf.reshape(data, ...))
tf.fingerprint(data) == tf.fingerprint(tf.bitcast(data, ...))
```
For string data, one should expect `tf.fingerprint(data) !=
tf.fingerprint(tf.string.reduce_join(data))` in general.
Args:
data: A `Tensor`. Must have rank 1 or higher.
method: A `Tensor` of type `tf.string`. Fingerprint method used by this op.
Currently available method is `farmhash64`.
name: A name for the operation (optional).
Returns:
A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
`data`'s first dimension, and the second dimension size depends on the
fingerprint algorithm.
"""
return gen_array_ops.fingerprint(data, method, name)
def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):
"""Converts the given value to an integer Tensor."""
tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)
if tensor.dtype.is_integer:
tensor = gen_math_ops.cast(tensor, dtype)
else:
raise TypeError("%s must be an integer tensor; dtype=%s" %
(name, tensor.dtype))
return tensor
def get_positive_axis(axis, ndims, axis_name="axis", ndims_name="ndims"):
"""Validate an `axis` parameter, and normalize it to be positive.
If `ndims` is known (i.e., not `None`), then check that `axis` is in the
range `-ndims <= axis < ndims`, and return `axis` (if `axis >= 0`) or
`axis + ndims` (otherwise).
If `ndims` is not known, and `axis` is positive, then return it as-is.
If `ndims` is not known, and `axis` is negative, then report an error.
Args:
axis: An integer constant
ndims: An integer constant, or `None`
axis_name: The name of `axis` (for error messages).
ndims_name: The name of `ndims` (for error messages).
Returns:
The normalized `axis` value.
Raises:
ValueError: If `axis` is out-of-bounds, or if `axis` is negative and
`ndims is None`.
"""
if not isinstance(axis, int):
raise TypeError("%s must be an int; got %s" %
(axis_name, type(axis).__name__))
if ndims is not None:
if 0 <= axis < ndims:
return axis
elif -ndims <= axis < 0:
return axis + ndims
else:
raise ValueError("%s=%s out of bounds: expected %s<=%s<%s" %
(axis_name, axis, -ndims, axis_name, ndims))
elif axis < 0:
raise ValueError("%s may only be negative if %s is statically known." %
(axis_name, ndims_name))
return axis
# This op is intended to exactly match the semantics of numpy.repeat, with
# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior
# when axis is not specified. Rather than implement that special behavior, we
# simply make `axis` be a required argument.
#
# External (OSS) `tf.repeat` feature request:
# https://github.com/tensorflow/tensorflow/issues/8246
def repeat_with_axis(data, repeats, axis, name=None):
"""Repeats elements of `data`.
Args:
data: An `N`-dimensional tensor.
repeats: A 1-D integer tensor specifying how many times each element in
`axis` should be repeated. `len(repeats)` must equal `data.shape[axis]`.
Supports broadcasting from a scalar value.
axis: `int`. The axis along which to repeat values. Must be less than
`max(N, 1)`.
name: A name for the operation.
Returns:
A tensor with `max(N, 1)` dimensions. Has the same shape as `data`,
except that dimension `axis` has size `sum(repeats)`.
Example usage:
>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
<tf.Tensor: shape=(5,), dtype=string,
numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
<tf.Tensor: shape=(5, 2), dtype=int32, numpy=
array([[1, 2],
[1, 2],
[3, 4],
[3, 4],
[3, 4]], dtype=int32)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
<tf.Tensor: shape=(2, 5), dtype=int32, numpy=
array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)>
"""
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
with ops.name_scope(name, "Repeat", [data, repeats]):
data = ops.convert_to_tensor(data, name="data")
repeats = convert_to_int_tensor(repeats, name="repeats")
repeats.shape.with_rank_at_most(1)
# If `data` is a scalar, then upgrade it to a vector.
data = _with_nonzero_rank(data)
data_shape = shape(data)
# If `axis` is negative, then convert it to a positive value.
axis = get_positive_axis(axis, data.shape.rank, ndims_name="rank(data)")
# If we know that `repeats` is a scalar, then we can just tile & reshape.
if repeats.shape.num_elements() == 1:
repeats = reshape(repeats, [])
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, repeats)
result_shape = concat([
data_shape[:axis], [repeats * data_shape[axis]], data_shape[axis + 1:]
],
axis=0)
return reshape(tiled, result_shape)
# Check data Tensor shapes.
if repeats.shape.ndims == 1:
data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])
repeats = broadcast_to(repeats, [data_shape[axis]])
repeats_original = repeats
# Broadcast the `repeats` tensor so rank(repeats) == axis + 1.
if repeats.shape.ndims != axis + 1:
repeats_shape = shape(repeats)
repeats_ndims = rank(repeats)
broadcast_shape = concat(
[data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)
repeats = broadcast_to(repeats, broadcast_shape)
repeats.set_shape([None] * (axis + 1))
# Create a "sequence mask" based on `repeats`, where slices across `axis`
# contain one `True` value for each repetition. E.g., if
# `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.
max_repeat = gen_math_ops.maximum(
0, gen_math_ops._max(repeats, _all_dimensions(repeats)))
mask = sequence_mask(repeats, max_repeat)
# Add a new dimension around each value that needs to be repeated, and
# then tile that new dimension to match the maximum number of repetitions.
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, max_repeat)
# Use `boolean_mask` to discard the extra repeated values. This also
# flattens all dimensions up through `axis`.
masked = boolean_mask(tiled, mask)
# Reshape the output tensor to add the outer dimensions back.
if axis == 0:
result = masked
else:
repeated_dim_size = gen_math_ops._sum(
repeats_original,
axis=gen_math_ops._range(0, rank(repeats_original), 1))
result_shape = concat(
[data_shape[:axis], [repeated_dim_size], data_shape[axis + 1:]],
axis=0)
result = reshape(masked, result_shape)
# Preserve shape information.
if data.shape.ndims is not None:
new_axis_size = 0 if repeats.shape[0] == 0 else None
result.set_shape(data.shape[:axis].concatenate(
[new_axis_size]).concatenate(data.shape[axis + 1:]))
return result
def tile_one_dimension(data, axis, multiple):
"""Tiles a single dimension of a tensor."""
# Assumes axis is a nonnegative int.
if data.shape.ndims is not None:
multiples = [1] * data.shape.ndims
multiples[axis] = multiple
else:
ones_value = ones(rank(data), dtypes.int32)
multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]],
axis=0)
return tile(data, multiples)
def _with_nonzero_rank(data):
"""If `data` is scalar, then add a dimension; otherwise return as-is."""
if data.shape.ndims is not None:
if data.shape.ndims == 0:
return stack([data])
else:
return data
else:
data_shape = shape(data)
data_ndims = rank(data)
return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])
@tf_export("repeat")
@dispatch.add_dispatch_support
def repeat(input, repeats, axis=None, name=None): # pylint: disable=redefined-builtin
"""Repeat elements of `input`.
See also `tf.concat`, `tf.stack`, `tf.tile`.
Args:
input: An `N`-dimensional Tensor.
repeats: An 1-D `int` Tensor. The number of repetitions for each element.
repeats is broadcasted to fit the shape of the given axis. `len(repeats)`
must equal `input.shape[axis]` if axis is not None.
axis: An int. The axis along which to repeat values. By default (axis=None),
use the flattened input array, and return a flat output array.
name: A name for the operation.
Returns:
A Tensor which has the same shape as `input`, except along the given axis.
If axis is None then the output array is flattened to match the flattened
input array.
Example usage:
>>> repeat(['a', 'b', 'c'], repeats=[3, 0, 2], axis=0)
<tf.Tensor: shape=(5,), dtype=string,
numpy=array([b'a', b'a', b'a', b'c', b'c'], dtype=object)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=0)
<tf.Tensor: shape=(5, 2), dtype=int32, numpy=
array([[1, 2],
[1, 2],
[3, 4],
[3, 4],
[3, 4]], dtype=int32)>
>>> repeat([[1, 2], [3, 4]], repeats=[2, 3], axis=1)
<tf.Tensor: shape=(2, 5), dtype=int32, numpy=
array([[1, 1, 2, 2, 2],
[3, 3, 4, 4, 4]], dtype=int32)>
>>> repeat(3, repeats=4)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([3, 3, 3, 3], dtype=int32)>
>>> repeat([[1,2], [3,4]], repeats=2)
<tf.Tensor: shape=(8,), dtype=int32,
numpy=array([1, 1, 2, 2, 3, 3, 4, 4], dtype=int32)>
"""
if axis is None:
input = reshape(input, [-1])
axis = 0
return repeat_with_axis(input, repeats, axis, name)
@tf_export("guarantee_const")
@deprecation.deprecated(None, "Not for public use.")
def guarantee_const(input, name=None): # pylint: disable=redefined-builtin
"""Promise to the TF runtime that the input tensor is a constant.
The runtime is then free to make optimizations based on this.
Returns the input tensor without modification.
Args:
input: A `Tensor`.
name: A name for this operation.
Returns:
A `Tensor`. Has the same dtype as `input`.
"""
return gen_array_ops.guarantee_const(input=input, name=name)
# Register elementwise ops that don't have Python wrappers.
dispatch.register_unary_elementwise_api(gen_array_ops.check_numerics)
| 35.514273 | 129 | 0.633125 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse
from tensorflow.python.types import core
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# existing 'slice' for later use in this module.
_BaseSlice = slice
@tf_export("reshape", v1=["reshape", "manip.reshape"])
@dispatch.add_dispatch_support
def reshape(tensor, shape, name=None): # pylint: disable=redefined-outer-name
result = gen_array_ops.reshape(tensor, shape, name)
tensor_util.maybe_set_static_shape(result, shape)
return result
@tf_export("fill")
@dispatch.add_dispatch_support
def fill(dims, value, name=None):
result = gen_array_ops.fill(dims, value, name=name)
tensor_util.maybe_set_static_shape(result, dims)
return result
@tf_export("identity")
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def identity(input, name=None): # pylint: disable=redefined-builtin
if isinstance(input, composite_tensor.CompositeTensor):
return nest.map_structure(identity, input, expand_composites=True)
if context.executing_eagerly() and not hasattr(input, "graph"):
# Make sure we get an input with handle data attached from resource
# variables. Variables have correct handle data when graph building.
input = ops.convert_to_tensor(input)
ret = gen_array_ops.identity(input, name=name)
# Propagate handle data for happier shape inference for resource variables.
if hasattr(input, "_handle_data"):
ret._handle_data = input._handle_data # pylint: disable=protected-access
return ret
# pylint: disable=redefined-builtin,protected-access
@tf_export(v1=["expand_dims"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("Must specify an axis argument to tf.expand_dims()")
return expand_dims_v2(input, axis, name)
@tf_export("expand_dims", v1=[])
@dispatch.add_dispatch_support
def expand_dims_v2(input, axis, name=None):
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated("2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@deprecation.deprecated("2018-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.sets.difference().")
@tf_export(v1=["setdiff1d"])
@dispatch.add_dispatch_support
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
@dispatch.add_dispatch_support
def broadcast_dynamic_shape(shape_x, shape_y):
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
@dispatch.add_dispatch_support
def broadcast_static_shape(shape_x, shape_y):
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape", v1=[])
@dispatch.add_dispatch_support
def shape_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
return shape(input, name, out_type)
@tf_export(v1=["shape"])
@dispatch.add_dispatch_support
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
@dispatch.add_dispatch_support
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size", v1=[])
@dispatch.add_dispatch_support
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
return size(input, name, out_type)
@tf_export(v1=["size"])
@dispatch.add_dispatch_support
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
if (context.executing_eagerly() and not hasattr(input, "graph") and
not isinstance(
input,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue))):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
@dispatch.add_dispatch_support
def rank(input, name=None):
# pylint: disable=redefined-builtin
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input = ops.convert_to_tensor(input)
input_shape = input.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
_SLICE_TYPE_ERROR = (
"Only integers, slices (`:`), ellipsis (`...`), "
"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
"indices")
_SUPPORTED_SLICE_DTYPES = (dtypes.int32, dtypes.int32_ref, dtypes.int64,
dtypes.int64_ref)
def _check_index(idx):
if isinstance(idx, (numbers.Integral, tensor_shape.Dimension)):
return
# Optimistic check. Assumptions:
# * any object with a dtype is supported
# * any object with a dtype has a sizeable shape attribute.
dtype = getattr(idx, "dtype", None)
if (dtype is None or dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
idx.shape and len(idx.shape) == 1):
# TODO(slebedev): IndexError seems more appropriate here, but it
# will break `_slice_helper` contract.
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
def _is_undefined_dimension(d):
return isinstance(d, tensor_shape.Dimension) and d.value is None
@tf_export("__operators__.getitem", v1=[])
@dispatch.add_dispatch_support
def _slice_helper(tensor, slice_spec, var=None):
tensor = ops.convert_to_tensor(tensor)
# TODO(wangpeng): Consider supporting var
if var is None and ops._numpy_style_slicing: # pylint: disable=protected-access
return tensor._numpy_style_getitem(slice_spec) # pylint: disable=protected-access
if isinstance(slice_spec, bool) or \
(isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
(isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
return boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
if s.start is not None and not _is_undefined_dimension(s.start):
_check_index(s.start)
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and not _is_undefined_dimension(s.stop):
_check_index(s.stop)
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None and not _is_undefined_dimension(s.step):
_check_index(s.step)
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
_check_index(s)
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(
None,
"strided_slice", [tensor] + begin + end + strides,
skip_on_eager=False) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
@dispatch.add_dispatch_support
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
@dispatch.add_dispatch_support
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
if var is not None:
def assign(val, name=None):
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
return _slice_helper(var.value(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
@dispatch.add_dispatch_support
def parallel_stack(values, name="parallel_stack"):
if context.executing_eagerly():
raise RuntimeError("tf.parallel_stack() is not compatible with "
"eager execution.")
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
@dispatch.add_dispatch_support
def stack(values, axis=0, name="stack"):
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -expanded_num_dims, expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
if all(isinstance(elem, core.Tensor) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if isinstance(elem, core.Tensor):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" %
(elem.dtype, dtype, elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if isinstance(converted_elem, core.Tensor):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if isinstance(elem, core.Tensor):
elems_as_tensors.append(elem)
else:
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
for elem in list_or_tuple:
if isinstance(elem, core.Tensor):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _cast_nested_seqs_to_dtype(dtype):
def _maybe_cast(elem):
if isinstance(elem, core.Tensor):
if dtype != elem.dtype.base_dtype:
elem = gen_math_ops.cast(elem, dtype)
return elem
return _maybe_cast
_NON_AUTOPACKABLE_TYPES = set(np.core.numerictypes.ScalarType)
_NON_AUTOPACKABLE_TYPES.add(np.ndarray)
def _should_not_autopack(v):
return all(type(elem) in _NON_AUTOPACKABLE_TYPES for elem in nest.flatten(v))
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
if as_ref or _should_not_autopack(v):
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
return NotImplemented
if dtype is None:
dtype = inferred_dtype
elif dtype != inferred_dtype:
v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
return _autopacking_helper(v, dtype, name or "packed")
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
@dispatch.add_dispatch_support
def unstack(value, num=None, axis=0, name="unstack"):
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape.dims[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
@dispatch.add_dispatch_support
def concat(values, axis, name="concat"):
if not isinstance(values, (list, tuple)):
values = [values]
if len(values) == 1:
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_has_rank(0)
return identity(values[0], name=name)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export(v1=["boolean_mask"])
@dispatch.add_dispatch_support
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
indices = squeeze(where_v2(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
axis_value = tensor_util.constant_value(axis)
if axis_value is not None:
axis = axis_value
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(
tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
if axis_value is not None:
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate(
[first_dim]).concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("boolean_mask", v1=[])
@dispatch.add_dispatch_support
def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
return boolean_mask(tensor, mask, name, axis)
@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = gen_array_ops.list_diff(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
@dispatch.add_dispatch_support
def unique(x, out_idx=dtypes.int32, name=None):
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
@dispatch.add_dispatch_support
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
@dispatch.add_dispatch_support
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
if isinstance(num_or_size_splits,
(numbers.Integral, tensor_shape.Dimension)):
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
size_splits = ops.convert_to_tensor(num_or_size_splits)
if size_splits._rank() == 0:
raise ValueError(
"Rank-0 tensors are not supported as the num_or_size_splits argument "
"to split. Argument provided: %s" % (num_or_size_splits,))
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose", v1=[])
@dispatch.add_dispatch_support
def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
@tf_export(v1=["transpose"])
@dispatch.add_dispatch_support
def transpose(a, perm=None, name="transpose", conjugate=False):
with ops.name_scope(name, "transpose", [a]) as name:
if not tensor_util.is_tf_type(a):
a = ops.convert_to_tensor(a, name="a")
if conjugate and a.dtype.is_complex:
transpose_fn = gen_array_ops.conjugate_transpose
else:
transpose_fn = gen_array_ops.transpose
if perm is not None:
return transpose_fn(a, perm, name=name)
rank = a.shape.rank
if rank is None:
perm = gen_math_ops._range(gen_array_ops.rank(a) - 1, -1, -1)
else:
perm = np.arange(rank - 1, -1, -1, dtype=np.int32)
return transpose_fn(a, perm, name=name)
@tf_export(
"linalg.matrix_transpose",
v1=["linalg.transpose", "linalg.matrix_transpose", "matrix_transpose"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_transpose", "linalg.transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat(
(gen_math_ops._range(0, a_rank - 2, 1), [a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
@tf_export("linalg.diag", v1=["linalg.diag", "matrix_diag"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_diag")
def matrix_diag(diagonal,
name="diag",
k=0,
num_rows=-1,
num_cols=-1,
padding_value=0,
align="RIGHT_LEFT"):
if hasattr(diagonal, "dtype") and diagonal.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_v3(
diagonal=diagonal,
k=k,
num_rows=num_rows,
num_cols=num_cols,
padding_value=padding_value,
align=align,
name=name)
@tf_export("linalg.diag_part", v1=["linalg.diag_part", "matrix_diag_part"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_diag_part")
def matrix_diag_part(
input,
name="diag_part",
k=0,
padding_value=0,
align="RIGHT_LEFT"):
if hasattr(input, "dtype") and input.dtype == "bool":
padding_value = bool(padding_value)
return gen_array_ops.matrix_diag_part_v3(
input=input, k=k, padding_value=padding_value, align=align, name=name)
@tf_export(
"linalg.tensor_diag_part", v1=["linalg.tensor_diag_part", "diag_part"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("diag_part")
def tensor_diag_part(
input,
name=None):
return gen_array_ops.diag_part(input=input, name=name)
@tf_export("linalg.set_diag", v1=["linalg.set_diag", "matrix_set_diag"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("matrix_set_diag")
def matrix_set_diag(
input,
diagonal,
name="set_diag",
k=0,
align="RIGHT_LEFT"):
return gen_array_ops.matrix_set_diag_v3(
input=input, diagonal=diagonal, k=k, align=align, name=name)
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except (NotImplementedError, TypeError):
pass
return None
def _tag_zeros_tensor(fun):
def wrapped(*args, **kwargs):
tensor = fun(*args, **kwargs)
tensor._is_zeros_tensor = True
return tensor
return tf_decorator.make_decorator(fun, wrapped)
@tf_export("zeros")
@dispatch.add_dispatch_support
@_tag_zeros_tensor
def zeros(shape, dtype=dtypes.float32, name=None):
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
elif dtype.is_quantized:
zero = np.zeros([]).astype(dtype.as_numpy_dtype)
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError, errors.UnimplementedError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["zeros_like"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def zeros_like(tensor, dtype=None, name=None, optimize=True):
return zeros_like_impl(tensor, dtype, name, optimize)
@tf_export("zeros_like", v1=[])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def zeros_like_v2(
input,
dtype=None,
name=None):
return zeros_like_impl(input, dtype, name, optimize=True)
@_tag_zeros_tensor
def zeros_like_impl(tensor, dtype, name, optimize=True):
with ops.name_scope(name, "zeros_like", [tensor]) as name:
if not tensor_util.is_tf_type(tensor):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor_shape = tensor.shape
tensor_dtype = tensor.dtype
if context.executing_eagerly():
if dtype is not None and dtype != tensor_dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
return gen_array_ops.zeros_like(tensor, name=name)
if (optimize and tensor_shape.is_fully_defined() and
tensor_dtype != dtypes.variant):
return zeros(tensor_shape, dtype=dtype or tensor_dtype, name=name)
if dtype is not None and dtype != tensor_dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export(v1=["ones_like"])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def ones_like(tensor, dtype=None, name=None, optimize=True):
return ones_like_impl(tensor, dtype, name, optimize)
@tf_export("ones_like", v1=[])
@dispatch.register_unary_elementwise_api
@dispatch.add_dispatch_support
def ones_like_v2(
input,
dtype=None,
name=None):
return ones_like_impl(input, dtype, name, optimize=True)
def ones_like_impl(tensor, dtype, name, optimize=True):
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
@dispatch.add_dispatch_support
def ones(shape, dtype=dtypes.float32, name=None):
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
if dtype == dtypes.bool:
one = True
elif dtype.is_quantized:
one = np.ones([]).astype(dtype.as_numpy_dtype)
else:
one = 1
if not isinstance(shape, ops.Tensor):
try:
if not context.executing_eagerly():
# op to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["placeholder"])
def placeholder(dtype, shape=None, name=None):
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
@tf_export(v1=["placeholder_with_default"])
def placeholder_with_default(input, shape, name=None):
return gen_array_ops.placeholder_with_default(input, shape, name)
@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
if context.executing_eagerly():
raise RuntimeError("`sparse_placeholder` is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
default_shape_name = (name + "/shape_default") if name is not None else None
if shape is None:
rank = None
dense_shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
dense_shape_default = tensor_util.constant_value_as_shape(dense_shape)
else:
if isinstance(shape, ops.Tensor):
rank = shape.get_shape()[0]
dense_shape_default = tensor_util.constant_value_as_shape(shape)
else:
rank = len(shape)
dense_shape_default = tensor_shape.TensorShape(
tuple(None if dim == -1 else dim for dim in shape))
shape = tuple(tensor_shape.dimension_value(dim) for dim in shape)
shape = tuple(-1 if dim is None else dim for dim in shape)
shape = ops.convert_to_tensor(
shape, dtype=dtypes.int64, name=default_shape_name)
dense_shape = placeholder_with_default(
shape, shape=shape.shape, name=shape_name)
result = sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64,
shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=dense_shape)
# default shape out of the placeholder. Override that
# shape to be the value determined here, so partial shapes can be
# propagated.
result._dense_shape_default = dense_shape_default
return result
# pylint: enable=redefined-outer-name
@tf_export("pad", v1=[])
@dispatch.add_dispatch_support
def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
return pad(tensor, paddings, mode, name, constant_values)
@tf_export(v1=["pad"])
@dispatch.add_dispatch_support
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if not tensor_util.is_tf_type(constant_values) and constant_values == 0:
result = gen_array_ops.pad(tensor, paddings, name=name)
else:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = _get_paddings_constant(paddings)
input_shape = (
tensor_shape.TensorShape(tensor.shape)
if isinstance(tensor, ops.Tensor) else result.op.inputs[0].shape)
if (input_shape.ndims is not None and
not result.shape.is_fully_defined() and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
def _get_paddings_constant(paddings):
if isinstance(paddings, ops.Tensor):
return tensor_util.constant_value(paddings, partial=True)
elif isinstance(paddings, (list, tuple)):
return [_get_paddings_constant(x) for x in paddings]
else:
return paddings
@tf_export("meshgrid")
@dispatch.add_dispatch_support
def meshgrid(*args, **kwargs):
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
if not ndim:
return []
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
@dispatch.add_dispatch_support
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("QuantizeAndDequantizeV4")
def _QuantizeAndDequantizeV4Grad(op, grad):
return quantize_and_dequantize_v4_grad(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
axis=op.get_attr("axis"))
@ops.RegisterGradient("QuantizeAndDequantizeV4Grad")
def _QuantizeAndDequantizeV4GradGrad(op, grad):
return _QuantizeAndDequantizeV4Grad(op, grad)
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape().dims[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack([[0, pad_end_extra[i]] for i in range(num_block_dims)],
name="crops")
return result_paddings, result_crops
@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
paddings,
block_size=None,
name=None,
block_shape=None): # pylint: disable=redefined-builtin
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
@dispatch.add_dispatch_support
def space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin
return space_to_batch_nd(input, block_shape, paddings, name)
space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("nn.space_to_depth", v1=[])
@dispatch.add_dispatch_support
def space_to_depth_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("nn.depth_to_space", v1=[])
@dispatch.add_dispatch_support
def depth_to_space_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export(v1=["batch_to_space"])
@dispatch.add_dispatch_support
def batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring
block_size = deprecation.deprecated_argument_lookup("block_shape",
block_shape, "block_size",
block_size)
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("batch_to_space", v1=[])
@dispatch.add_dispatch_support
def batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin
if isinstance(block_shape, int):
block_shape = np.array([block_shape, block_shape], dtype=np.int64)
return batch_to_space_nd(
input=input, block_shape=block_shape, crops=crops, name=name)
@tf_export("one_hot")
@dispatch.add_dispatch_support
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
with ops.name_scope(
name, "one_hot",
[indices, depth, on_value, off_value, axis, dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
if on_exists:
on_value = ops.convert_to_tensor(on_value, dtype_hint=dtype)
if off_exists:
off_value = ops.convert_to_tensor(off_value, dtype_hint=dtype)
on_dtype = on_value.dtype.base_dtype if on_exists else None
off_dtype = off_value.dtype.base_dtype if off_exists else None
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
@dispatch.add_dispatch_support
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.is_compatible_with(dtype):
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export(v1=["squeeze"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
axis = deprecation.deprecated_argument_lookup("axis", axis, "squeeze_dims",
squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("squeeze", v1=[])
@dispatch.add_dispatch_support
def squeeze_v2(input, axis=None, name=None):
# pylint: disable=redefined-builtin
return squeeze(input, axis, name)
@tf_export(v1=["where"])
@dispatch.add_dispatch_support
def where(condition, x=None, y=None, name=None):
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
@tf_export("where", v1=["where_v2"])
@dispatch.add_dispatch_support
def where_v2(condition, x=None, y=None, name=None):
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select_v2(condition=condition, t=x, e=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export(v1=["reverse_sequence"])
@deprecation.deprecated_args(None,
"seq_dim is deprecated, use seq_axis instead",
"seq_dim")
@deprecation.deprecated_args(None,
"batch_dim is deprecated, use batch_axis instead",
"batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
@tf_export("reverse_sequence", v1=[])
@dispatch.add_dispatch_support
def reverse_sequence_v2(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None):
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
# pylint: enable=redefined-builtin
@tf_export(v1=["gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
("The `validate_indices` argument has no effect. "
"Indices are always validated on CPU and never "
"validated on GPU."),
("validate_indices", None))
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0): # pylint: disable=g-doc-args
del validate_indices
if axis is None:
axis = batch_dims
if tensor_util.constant_value(axis) != 0:
return gen_array_ops.gather_v2(
params, indices, axis, batch_dims=batch_dims, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
@tf_export("gather", v1=[])
@dispatch.add_dispatch_support
def gather_v2(params,
indices,
validate_indices=None,
axis=None,
batch_dims=0,
name=None):
return gather(
params,
indices,
validate_indices=validate_indices,
name=name,
axis=axis,
batch_dims=batch_dims)
gather_v2.__doc__ = gather.__doc__
@tf_export(v1=["batch_gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims=-1` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
with ops.name_scope(name, "BatchGather", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if indices.shape.ndims is None:
raise ValueError(
"batch_gather does not allow indices with unknown shape.")
return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
def _batch_gather(params, indices, batch_dims, axis=None):
if batch_dims is not None and not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError("tf.gather does not allow indices with unknown "
"rank when batch_dims is specified.")
if batch_dims is None:
batch_dims = indices_ndims - 1
if batch_dims < 0:
batch_dims += indices_ndims
if batch_dims < 0 or batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params.shape.ndims))
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, recursively calling batch_gather with axis=0, and then
# transposing the result to put the pre-axis dimensions before the indices
# dimensions.
if axis is not None and axis != batch_dims:
# Adjust axis to be positive.
if not isinstance(axis, int):
axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
elif axis < 0 and params.shape.ndims is None:
axis = axis + array_ops.rank(params)
else:
if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
raise ValueError("axis (%d) out of range [%d, %d)" %
(axis, -params.shape.ndims, params.shape.ndims))
if axis < 0:
axis += params.shape.ndims
if axis < batch_dims:
raise ValueError("batch_dims = %d must be less than or equal to "
"axis = %d" % (batch_dims, axis))
# Move params[axis] up to params[batch_dims].
perm = [
list(range(batch_dims)), [axis],
gen_math_ops._range(batch_dims, axis, 1),
gen_math_ops._range(axis + 1, rank(params), 1)
]
params = transpose(params, concat(perm, axis=0))
result = _batch_gather(params, indices, batch_dims=batch_dims)
# Move the result dimensions corresponding to params[batch_dims:axis]
# to just before the dimensions corresponding to indices[batch_dims:].
params_start = indices_ndims + axis - batch_dims
perm = [
list(range(batch_dims)),
gen_math_ops._range(indices_ndims, params_start, 1),
list(range(batch_dims, indices_ndims)),
gen_math_ops._range(params_start, rank(result), 1)
]
return transpose(result, perm=concat(perm, axis=0))
indices_shape = shape(indices)
params_shape = shape(params)
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
accum_dim_value = ones((), dtype=indices_dtype)
# Use correct type for offset index computation
casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = zeros((), dtype=indices_dtype)
step = ones((), dtype=indices_dtype)
dim_indices = gen_math_ops._range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = stack(
[1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
batch_indices += reshape(dim_indices, dim_shape)
flat_indices = reshape(batch_indices, [-1])
outer_shape = params_shape[batch_dims + 1:]
flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
False)
flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
axis=0))
flat_result = gather(flat_params, flat_indices)
result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
final_shape = indices.get_shape()[:batch_dims].merge_with(
params.get_shape()[:batch_dims])
final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
result.set_shape(final_shape)
return result
@tf_export(v1=["gather_nd", "manip.gather_nd"])
@dispatch.add_dispatch_support
@deprecated_endpoints("manip.gather_nd")
def gather_nd(params, indices, name=None, batch_dims=0):
batch_dims_ = tensor_util.constant_value(batch_dims)
if batch_dims_ is not None:
batch_dims = int(batch_dims_)
if batch_dims == 0:
try:
# TODO(apassos) find a less bad way of detecting resource variables
# without introducing a circular dependency.
return params.gather_nd(indices, name=name)
except AttributeError:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return batch_gather_nd(params, indices, batch_dims=batch_dims, name=name)
@tf_export("gather_nd", v1=[])
@dispatch.add_dispatch_support
def gather_nd_v2(params, indices, batch_dims=0, name=None):
return gather_nd(params, indices, name=name, batch_dims=batch_dims)
gather_nd_v2.__doc__ = gather_nd.__doc__
def batch_gather_nd(params, indices, batch_dims, name=None):
with ops.name_scope(name, "BatchGatherND", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % (batch_dims,))
if batch_dims < 0:
raise ValueError("tf.gather_nd does not allow negative batch_dims.")
params_ndims = params.shape.ndims
indices_ndims = indices.shape.ndims
if indices_ndims is not None and batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params_ndims is not None and batch_dims >= params_ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params_ndims))
expand = batch_dims == 0
if expand:
# Normally gather_nd will be called when batch_dims == 0.
# But if this function is called with batch_dims = 0, e.g. for testing
# purposes, this adds a dummy batch dimension to make batch_dims = 1.
params = expand_dims(params, axis=0)
indices = expand_dims(indices, axis=0)
batch_dims = 1
params_shape = shape(params)
indices_shape = shape(indices)
batch_shape = params_shape[:batch_dims]
batch_size = gen_math_ops.prod(batch_shape, [0])
index_internal_ndims = rank(indices) - batch_dims - 1
indices_internal_shape = indices_shape[batch_dims:-1]
# Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
# with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
# 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
# to the entire 'params' tensor.
# Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
# grid of size B1 x B2.
batch_dim_list = unstack(batch_shape, axis=0)
dim_ranges = [
gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
for x in batch_dim_list
]
mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
# Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
index_grid = transpose(stack(flat_list, axis=0))
# We need to concatenate these batch coordinates with the internal indices.
# concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
# So we reshape them both to [(B1.B2), i1, ..., iK, *]
index_grid_shape = shape(index_grid)
index_grid = reshape(
index_grid,
concat([
index_grid_shape[:1],
ones(index_internal_ndims, dtype=dtypes.int32), index_grid_shape[1:]
],
axis=0))
tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
index_grid = tile(index_grid, multiples=tile_shape)
# index_grid now has shape [(B1.B2), i1, ..., iK, 2]
flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
flat_indices = reshape(indices, shape=flat_shape)
# flat_indices now has shape [(B1.B2), i1, ..., iK, C]
indices = concat((index_grid, flat_indices), axis=-1)
# indices has shape [(B1.B2), i1, ..., iK, 2+C]
out = gen_array_ops.gather_nd(params, indices)
# out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
# its original form.
out_shape = shape(out)
out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
if expand:
out = squeeze(out, axis=0)
return out
@deprecation.deprecated_endpoints("tensor_scatter_update")
@tf_export(
"tensor_scatter_nd_update",
v1=["tensor_scatter_nd_update", "tensor_scatter_update"])
@dispatch.add_dispatch_support
def tensor_scatter_nd_update(tensor, indices, updates, name=None):
return gen_array_ops.tensor_scatter_update(
tensor=tensor, indices=indices, updates=updates, name=name)
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
# (And also now because of 'axis' processing).
@tf_export(v1=["quantize_v2"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
"instead.") # pylint: disable=missing-docstring
def quantize_v2(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO",
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if ensure_minimum_range != 0.01:
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis)
quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
# We want to expose tf.quantization.quantize instead of
# tf.quantization.quantize; we can deprecate tf.quantization.quantize in next
# version of TensorFlow.
@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("quantize")
def quantize(
input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None,
narrow_range=False,
axis=None,
ensure_minimum_range=0.01):
if ensure_minimum_range != 0.01:
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis,
ensure_minimum_range=ensure_minimum_range)
return quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name,
narrow_range=narrow_range,
axis=axis)
@tf_export("quantization.dequantize", v1=["quantization.dequantize",
"dequantize"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("dequantize")
def dequantize( # pylint: disable=missing-docstring
input, # pylint: disable=redefined-builtin
min_range,
max_range,
mode="MIN_COMBINED",
name=None,
axis=None,
narrow_range=False,
dtype=dtypes.float32):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
if axis >= 0 or narrow_range:
return gen_array_ops.dequantize(
input,
min_range,
max_range,
mode=mode,
name=name,
narrow_range=narrow_range,
axis=axis,
dtype=dtype)
return gen_array_ops.dequantize(
input, min_range, max_range, mode=mode, name=name, dtype=dtype)
dequantize.__doc__ = gen_array_ops.dequantize.__doc__
@tf_export("quantization.quantize_and_dequantize")
@dispatch.add_dispatch_support
@deprecation.deprecated(None,
"This Op has been deprecated, use" +
"`quantize_and_dequantize_v2` instead. To " +
"To simulate the V1 the behavior of " +
"tf.quantization.quantize_and_dequantize(...) use " +
"tf.grad_pass_through(" +
"tf.quantization.quantize_and_dequantize_v2)(...).")
def quantize_and_dequantize(
input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False,
axis=None):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
return gen_array_ops.quantize_and_dequantize_v2(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
name=name)
@tf_export("quantization.quantize_and_dequantize_v2")
@dispatch.add_dispatch_support
def quantize_and_dequantize_v2(
input, # pylint: disable=redefined-builtin
input_min,
input_max,
signed_input=True,
num_bits=8,
range_given=False,
round_mode="HALF_TO_EVEN",
name=None,
narrow_range=False,
axis=None):
if axis is None:
axis = -1
elif axis < 0:
if input.shape.ndims is None:
raise ValueError("input should have known rank to use negative axis.")
axis %= input.shape.ndims
return gen_array_ops.quantize_and_dequantize_v4(
input,
input_min=input_min,
input_max=input_max,
signed_input=signed_input,
num_bits=num_bits,
range_given=range_given,
round_mode=round_mode,
narrow_range=narrow_range,
axis=axis,
name=name)
@tf_export("searchsorted")
@dispatch.add_dispatch_support
def searchsorted(sorted_sequence,
values,
side="left",
out_type=dtypes.int32,
name=None):
sequence_size = shape_internal(sorted_sequence)[-1]
values_size = shape_internal(values)[-1]
sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
values_2d = reshape(values, [-1, values_size])
if side == "right":
output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
name)
elif side == "left":
output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
name)
else:
raise ValueError("side must be either 'right' or 'left'. Saw: %s." % side)
return reshape(output, shape_internal(values))
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
@tf_export("image.extract_patches")
@dispatch.add_dispatch_support
def extract_image_patches_v2(images, sizes, strides, rates, padding, name=None):
return gen_array_ops.extract_image_patches(images, sizes, strides, rates,
padding, name)
@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "ksizes is deprecated, use sizes instead",
"ksizes")
def extract_image_patches( # pylint: disable=missing-docstring
images,
ksizes=None,
strides=None,
rates=None,
padding=None,
name=None,
sizes=None):
ksizes = deprecation.deprecated_argument_lookup("sizes", sizes, "ksizes",
ksizes)
return gen_array_ops.extract_image_patches(images, ksizes, strides, rates,
padding, name)
extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
@tf_export("fingerprint")
@dispatch.add_dispatch_support
def fingerprint(data, method="farmhash64", name=None):
return gen_array_ops.fingerprint(data, method, name)
def convert_to_int_tensor(tensor, name, dtype=dtypes.int32):
tensor = ops.convert_to_tensor(tensor, name=name, preferred_dtype=dtype)
if tensor.dtype.is_integer:
tensor = gen_math_ops.cast(tensor, dtype)
else:
raise TypeError("%s must be an integer tensor; dtype=%s" %
(name, tensor.dtype))
return tensor
def get_positive_axis(axis, ndims, axis_name="axis", ndims_name="ndims"):
if not isinstance(axis, int):
raise TypeError("%s must be an int; got %s" %
(axis_name, type(axis).__name__))
if ndims is not None:
if 0 <= axis < ndims:
return axis
elif -ndims <= axis < 0:
return axis + ndims
else:
raise ValueError("%s=%s out of bounds: expected %s<=%s<%s" %
(axis_name, axis, -ndims, axis_name, ndims))
elif axis < 0:
raise ValueError("%s may only be negative if %s is statically known." %
(axis_name, ndims_name))
return axis
# This op is intended to exactly match the semantics of numpy.repeat, with
# one exception: numpy.repeat has special (and somewhat non-intuitive) behavior
# when axis is not specified. Rather than implement that special behavior, we
# simply make `axis` be a required argument.
#
# External (OSS) `tf.repeat` feature request:
# https://github.com/tensorflow/tensorflow/issues/8246
def repeat_with_axis(data, repeats, axis, name=None):
if not isinstance(axis, int):
raise TypeError("axis must be an int; got %s" % type(axis).__name__)
with ops.name_scope(name, "Repeat", [data, repeats]):
data = ops.convert_to_tensor(data, name="data")
repeats = convert_to_int_tensor(repeats, name="repeats")
repeats.shape.with_rank_at_most(1)
# If `data` is a scalar, then upgrade it to a vector.
data = _with_nonzero_rank(data)
data_shape = shape(data)
# If `axis` is negative, then convert it to a positive value.
axis = get_positive_axis(axis, data.shape.rank, ndims_name="rank(data)")
# If we know that `repeats` is a scalar, then we can just tile & reshape.
if repeats.shape.num_elements() == 1:
repeats = reshape(repeats, [])
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, repeats)
result_shape = concat([
data_shape[:axis], [repeats * data_shape[axis]], data_shape[axis + 1:]
],
axis=0)
return reshape(tiled, result_shape)
# Check data Tensor shapes.
if repeats.shape.ndims == 1:
data.shape.dims[axis].assert_is_compatible_with(repeats.shape[0])
repeats = broadcast_to(repeats, [data_shape[axis]])
repeats_original = repeats
# Broadcast the `repeats` tensor so rank(repeats) == axis + 1.
if repeats.shape.ndims != axis + 1:
repeats_shape = shape(repeats)
repeats_ndims = rank(repeats)
broadcast_shape = concat(
[data_shape[:axis + 1 - repeats_ndims], repeats_shape], axis=0)
repeats = broadcast_to(repeats, broadcast_shape)
repeats.set_shape([None] * (axis + 1))
# Create a "sequence mask" based on `repeats`, where slices across `axis`
# contain one `True` value for each repetition. E.g., if
# `repeats = [3, 1, 2]`, then `mask = [[1, 1, 1], [1, 0, 0], [1, 1, 0]]`.
max_repeat = gen_math_ops.maximum(
0, gen_math_ops._max(repeats, _all_dimensions(repeats)))
mask = sequence_mask(repeats, max_repeat)
# Add a new dimension around each value that needs to be repeated, and
# then tile that new dimension to match the maximum number of repetitions.
expanded = expand_dims(data, axis + 1)
tiled = tile_one_dimension(expanded, axis + 1, max_repeat)
# Use `boolean_mask` to discard the extra repeated values. This also
# flattens all dimensions up through `axis`.
masked = boolean_mask(tiled, mask)
# Reshape the output tensor to add the outer dimensions back.
if axis == 0:
result = masked
else:
repeated_dim_size = gen_math_ops._sum(
repeats_original,
axis=gen_math_ops._range(0, rank(repeats_original), 1))
result_shape = concat(
[data_shape[:axis], [repeated_dim_size], data_shape[axis + 1:]],
axis=0)
result = reshape(masked, result_shape)
# Preserve shape information.
if data.shape.ndims is not None:
new_axis_size = 0 if repeats.shape[0] == 0 else None
result.set_shape(data.shape[:axis].concatenate(
[new_axis_size]).concatenate(data.shape[axis + 1:]))
return result
def tile_one_dimension(data, axis, multiple):
# Assumes axis is a nonnegative int.
if data.shape.ndims is not None:
multiples = [1] * data.shape.ndims
multiples[axis] = multiple
else:
ones_value = ones(rank(data), dtypes.int32)
multiples = concat([ones_value[:axis], [multiple], ones_value[axis + 1:]],
axis=0)
return tile(data, multiples)
def _with_nonzero_rank(data):
if data.shape.ndims is not None:
if data.shape.ndims == 0:
return stack([data])
else:
return data
else:
data_shape = shape(data)
data_ndims = rank(data)
return reshape(data, concat([[1], data_shape], axis=0)[-data_ndims:])
@tf_export("repeat")
@dispatch.add_dispatch_support
def repeat(input, repeats, axis=None, name=None): # pylint: disable=redefined-builtin
if axis is None:
input = reshape(input, [-1])
axis = 0
return repeat_with_axis(input, repeats, axis, name)
@tf_export("guarantee_const")
@deprecation.deprecated(None, "Not for public use.")
def guarantee_const(input, name=None): # pylint: disable=redefined-builtin
return gen_array_ops.guarantee_const(input=input, name=name)
# Register elementwise ops that don't have Python wrappers.
dispatch.register_unary_elementwise_api(gen_array_ops.check_numerics)
| true | true |
1c39835f2d752bd68ffb9c9f399f30231b3bf30e | 8,399 | py | Python | client_state_machine.py | hypedguy/Chat-System | 445e8967133ec9affd31cf3a0178d949be85347d | [
"MIT"
] | null | null | null | client_state_machine.py | hypedguy/Chat-System | 445e8967133ec9affd31cf3a0178d949be85347d | [
"MIT"
] | null | null | null | client_state_machine.py | hypedguy/Chat-System | 445e8967133ec9affd31cf3a0178d949be85347d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 00:00:32 2015
@author: zhengzhang
"""
from chat_utils import *
from configuredraycaster import dorender
import encoder
import json
import hashlib
from PIL import Image
class DummyThread:
def is_alive(self):
return False
def dostuff(socket, config, num, start, end):
comment, result= dorender(config, start=start,end=end)
print (result)
print('render result: %s' % comment)
mysend(socket, (M_RESULT + "%d$$" + encoder.encrypt(result))%num)
print("<Client: Done render.>")
def stitch(imgparts):
width = imgparts[0].size[0]
heightper = imgparts[0].size[1]
height = heightper * len(imgparts)
final = Image.new('RGB', (width, height))
for i in range(len(imgparts)):
image = imgparts[i]
final.paste(image, (0, i*heightper))
return final
def hashed(config):
w,h = config['camera']['imageDim']
string = str(config)
hashobj = hashlib.sha1(string.encode())
return '%dx%d_' % (w,h) + hashobj.hexdigest()
class ClientSM:
def __init__(self, s):
self.state = S_OFFLINE
self.peer = ''
self.me = ''
self.out_msg = ''
self.thread = DummyThread()
self.s = s
self.prevconfig = ""
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
def set_myname(self, name):
self.me = name
def get_myname(self):
return self.me
def connect_to(self, peer):
msg = M_CONNECT + peer
mysend(self.s, msg)
response = myrecv(self.s)
if response == (M_CONNECT+'ok'):
self.peer = peer
self.out_msg += 'You are connected with '+ self.peer + '\n'
return (True)
elif response == (M_CONNECT + 'busy'):
self.out_msg += 'User is busy. Please try again later\n'
elif response == (M_CONNECT + 'hey you'):
self.out_msg += 'Cannot talk to yourself (sick)\n'
else:
self.out_msg += 'User is not online, try again later\n'
return(False)
def disconnect(self):
msg = M_DISCONNECT
mysend(self.s, msg)
self.out_msg += 'You are disconnected from ' + self.peer + '\n'
self.peer = ''
def proc(self, my_msg, peer_code, peer_msg):
self.out_msg = ''
#==============================================================================
# Once logged in, do a few things: get peer listing, connect, search
# And, of course, if you are so bored, just go
# This is event handling instate "S_LOGGEDIN"
#==============================================================================
if self.state == S_LOGGEDIN:
# todo: can't deal with multiple lines yet
if len(my_msg) > 0:
if my_msg == 'q':
self.out_msg += 'See you next time!\n'
self.state = S_OFFLINE
elif my_msg == 'time':
mysend(self.s, M_TIME)
time_in = myrecv(self.s)
self.out_msg += time_in
elif my_msg == 'who':
mysend(self.s, M_LIST)
logged_in = myrecv(self.s)
self.out_msg += 'Here are all the users in the system:\n'
self.out_msg += logged_in
elif my_msg[0] == 'c':
peer = my_msg[1:].strip()
if self.connect_to(peer) == True:
self.state = S_CHATTING
self.out_msg += 'Connect to ' + peer + '. Chat away!\n\n'
self.out_msg += '-----------------------------------\n'
else:
self.out_msg += 'Connection unsuccessful\n'
elif my_msg[0] == '?':
term = my_msg[1:].strip()
mysend(self.s, M_SEARCH + term)
search_rslt = myrecv(self.s)[1:].strip()
if (len(search_rslt)) > 0:
self.out_msg += search_rslt + '\n\n'
else:
self.out_msg += '\'' + term + '\'' + ' not found\n\n'
elif my_msg[0] == 'p':
poem_idx = my_msg[1:].strip()
mysend(self.s, M_POEM + poem_idx)
poem = myrecv(self.s)[1:].strip()
if (len(poem) > 0):
self.out_msg += poem + '\n\n'
else:
self.out_msg += 'Sonnet ' + poem_idx + ' not found\n\n'
else:
self.out_msg += menu
if len(peer_msg) > 0:
if peer_code == M_CONNECT:
self.peer = peer_msg
self.out_msg += 'Request from ' + self.peer + '\n'
self.out_msg += 'You are connected with ' + self.peer
self.out_msg += '. Chat away!\n\n'
self.out_msg += '------------------------------------\n'
#mysend(self.s, "[s]" + "d" * 299939999)
self.state = S_CHATTING
#==============================================================================
# Start chatting, 'bye' for quit
# This is event handling instate "S_CHATTING"
#==============================================================================
elif self.state == S_CHATTING:
if len(my_msg) > 0: # My stuff, going out
if my_msg[:4] == 'load':
try:
config = open(my_msg[5:]).read()
mysend(self.s, M_CONFIG + config)
except FileNotFoundError:
self.out_msg += "The file %s does not exist." % my_msg[1:]
elif my_msg == 'start':
mysend(self.s, M_START)
else:
mysend(self.s, M_EXCHANGE + "[" + self.me + "] " + my_msg)
if my_msg == 'bye':
self.disconnect()
self.state = S_LOGGEDIN
self.peer = ''
if len(peer_msg) > 0: # Peer's stuff, coming in
# New peer joins
if peer_code == M_CONNECT:
self.out_msg += "(" + peer_msg + " joined)\n"
elif peer_code == M_CONFIG:
self.out_msg += "<Server: Recieved config data of length %d>" % len(peer_msg[1:])
elif peer_code == M_START:
print("<Server: Starting render.>")
num, start, end, config = peer_msg.split("$$")
self.prevconfig = json.loads(config)
num = int(num)
start = int(start)
end = int(end)
if not self.thread.is_alive():
self.thread = threading.Thread(target=dostuff,
args=(self.s, config, num, start, end))
self.thread.start()
elif peer_code == M_RESULT:
# first, restore parts
parts = json.loads(peer_msg)
imgparts = [encoder.decrypt(x) for x in parts]
# stitch together sections
final = stitch(imgparts)
self.out_msg += "Recieved results."
final.show()
final.save('out/' + hashed(self.prevconfig) + ".png")
else:
self.out_msg += peer_msg
# I got bumped out
if peer_code == M_DISCONNECT:
self.state = S_LOGGEDIN
# Display the menu again
if self.state == S_LOGGEDIN:
self.out_msg += menu
#==============================================================================
# invalid state
#==============================================================================
else:
self.out_msg += 'How did you wind up here??\n'
print_state(self.state)
return self.out_msg
| 39.247664 | 101 | 0.434218 |
from chat_utils import *
from configuredraycaster import dorender
import encoder
import json
import hashlib
from PIL import Image
class DummyThread:
def is_alive(self):
return False
def dostuff(socket, config, num, start, end):
comment, result= dorender(config, start=start,end=end)
print (result)
print('render result: %s' % comment)
mysend(socket, (M_RESULT + "%d$$" + encoder.encrypt(result))%num)
print("<Client: Done render.>")
def stitch(imgparts):
width = imgparts[0].size[0]
heightper = imgparts[0].size[1]
height = heightper * len(imgparts)
final = Image.new('RGB', (width, height))
for i in range(len(imgparts)):
image = imgparts[i]
final.paste(image, (0, i*heightper))
return final
def hashed(config):
w,h = config['camera']['imageDim']
string = str(config)
hashobj = hashlib.sha1(string.encode())
return '%dx%d_' % (w,h) + hashobj.hexdigest()
class ClientSM:
def __init__(self, s):
self.state = S_OFFLINE
self.peer = ''
self.me = ''
self.out_msg = ''
self.thread = DummyThread()
self.s = s
self.prevconfig = ""
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
def set_myname(self, name):
self.me = name
def get_myname(self):
return self.me
def connect_to(self, peer):
msg = M_CONNECT + peer
mysend(self.s, msg)
response = myrecv(self.s)
if response == (M_CONNECT+'ok'):
self.peer = peer
self.out_msg += 'You are connected with '+ self.peer + '\n'
return (True)
elif response == (M_CONNECT + 'busy'):
self.out_msg += 'User is busy. Please try again later\n'
elif response == (M_CONNECT + 'hey you'):
self.out_msg += 'Cannot talk to yourself (sick)\n'
else:
self.out_msg += 'User is not online, try again later\n'
return(False)
def disconnect(self):
msg = M_DISCONNECT
mysend(self.s, msg)
self.out_msg += 'You are disconnected from ' + self.peer + '\n'
self.peer = ''
def proc(self, my_msg, peer_code, peer_msg):
self.out_msg = ''
if self.state == S_LOGGEDIN:
if len(my_msg) > 0:
if my_msg == 'q':
self.out_msg += 'See you next time!\n'
self.state = S_OFFLINE
elif my_msg == 'time':
mysend(self.s, M_TIME)
time_in = myrecv(self.s)
self.out_msg += time_in
elif my_msg == 'who':
mysend(self.s, M_LIST)
logged_in = myrecv(self.s)
self.out_msg += 'Here are all the users in the system:\n'
self.out_msg += logged_in
elif my_msg[0] == 'c':
peer = my_msg[1:].strip()
if self.connect_to(peer) == True:
self.state = S_CHATTING
self.out_msg += 'Connect to ' + peer + '. Chat away!\n\n'
self.out_msg += '-----------------------------------\n'
else:
self.out_msg += 'Connection unsuccessful\n'
elif my_msg[0] == '?':
term = my_msg[1:].strip()
mysend(self.s, M_SEARCH + term)
search_rslt = myrecv(self.s)[1:].strip()
if (len(search_rslt)) > 0:
self.out_msg += search_rslt + '\n\n'
else:
self.out_msg += '\'' + term + '\'' + ' not found\n\n'
elif my_msg[0] == 'p':
poem_idx = my_msg[1:].strip()
mysend(self.s, M_POEM + poem_idx)
poem = myrecv(self.s)[1:].strip()
if (len(poem) > 0):
self.out_msg += poem + '\n\n'
else:
self.out_msg += 'Sonnet ' + poem_idx + ' not found\n\n'
else:
self.out_msg += menu
if len(peer_msg) > 0:
if peer_code == M_CONNECT:
self.peer = peer_msg
self.out_msg += 'Request from ' + self.peer + '\n'
self.out_msg += 'You are connected with ' + self.peer
self.out_msg += '. Chat away!\n\n'
self.out_msg += '------------------------------------\n'
#mysend(self.s, "[s]" + "d" * 299939999)
self.state = S_CHATTING
#==============================================================================
# Start chatting, 'bye' for quit
# This is event handling instate "S_CHATTING"
#==============================================================================
elif self.state == S_CHATTING:
if len(my_msg) > 0: # My stuff, going out
if my_msg[:4] == 'load':
try:
config = open(my_msg[5:]).read()
mysend(self.s, M_CONFIG + config)
except FileNotFoundError:
self.out_msg += "The file %s does not exist." % my_msg[1:]
elif my_msg == 'start':
mysend(self.s, M_START)
else:
mysend(self.s, M_EXCHANGE + "[" + self.me + "] " + my_msg)
if my_msg == 'bye':
self.disconnect()
self.state = S_LOGGEDIN
self.peer = ''
if len(peer_msg) > 0: # Peer's stuff, coming in
if peer_code == M_CONNECT:
self.out_msg += "(" + peer_msg + " joined)\n"
elif peer_code == M_CONFIG:
self.out_msg += "<Server: Recieved config data of length %d>" % len(peer_msg[1:])
elif peer_code == M_START:
print("<Server: Starting render.>")
num, start, end, config = peer_msg.split("$$")
self.prevconfig = json.loads(config)
num = int(num)
start = int(start)
end = int(end)
if not self.thread.is_alive():
self.thread = threading.Thread(target=dostuff,
args=(self.s, config, num, start, end))
self.thread.start()
elif peer_code == M_RESULT:
parts = json.loads(peer_msg)
imgparts = [encoder.decrypt(x) for x in parts]
final = stitch(imgparts)
self.out_msg += "Recieved results."
final.show()
final.save('out/' + hashed(self.prevconfig) + ".png")
else:
self.out_msg += peer_msg
if peer_code == M_DISCONNECT:
self.state = S_LOGGEDIN
if self.state == S_LOGGEDIN:
self.out_msg += menu
else:
self.out_msg += 'How did you wind up here??\n'
print_state(self.state)
return self.out_msg
| true | true |
1c398573bc12664f86422bc24fced85238c963a6 | 435 | py | Python | clean_db.json.py | usc-isi-i2/mydig-webservice | 9628f72fed9f33d0fe341c3d8c3324cb198aae74 | [
"MIT"
] | 2 | 2018-12-19T16:41:50.000Z | 2021-11-11T20:52:25.000Z | clean_db.json.py | research-software-company/mydig-webservice | 9628f72fed9f33d0fe341c3d8c3324cb198aae74 | [
"MIT"
] | 55 | 2017-06-09T15:53:56.000Z | 2018-04-16T23:53:30.000Z | clean_db.json.py | research-software-company/mydig-webservice | 9628f72fed9f33d0fe341c3d8c3324cb198aae74 | [
"MIT"
] | 12 | 2017-08-06T19:49:44.000Z | 2020-02-16T07:12:09.000Z | import json
from optparse import OptionParser
if __name__ == '__main__':
parser = OptionParser()
(c_options, args) = parser.parse_args()
input_path = args[0]
tld_to_remove = args[1]
db = json.load(open(input_path, 'r'))
dbo = open('db.json', 'w')
out = dict()
for k in db.keys():
print(k, tld_to_remove)
if k != tld_to_remove:
out[k] = db[k]
dbo.write(json.dumps(out)) | 24.166667 | 43 | 0.593103 | import json
from optparse import OptionParser
if __name__ == '__main__':
parser = OptionParser()
(c_options, args) = parser.parse_args()
input_path = args[0]
tld_to_remove = args[1]
db = json.load(open(input_path, 'r'))
dbo = open('db.json', 'w')
out = dict()
for k in db.keys():
print(k, tld_to_remove)
if k != tld_to_remove:
out[k] = db[k]
dbo.write(json.dumps(out)) | true | true |
1c39867a254f6ffc0d1528f327164f96587a0b71 | 9,697 | py | Python | EPImix_analysis/epimix_functions.py | frantisekvasa/epimix_rapid_processing | b4bc3cc8b2f473fd8f9538376b4ffbd6a5e9374f | [
"MIT"
] | null | null | null | EPImix_analysis/epimix_functions.py | frantisekvasa/epimix_rapid_processing | b4bc3cc8b2f473fd8f9538376b4ffbd6a5e9374f | [
"MIT"
] | null | null | null | EPImix_analysis/epimix_functions.py | frantisekvasa/epimix_rapid_processing | b4bc3cc8b2f473fd8f9538376b4ffbd6a5e9374f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 10:38:35 2020
@author: Frantisek Vasa (fdv247@gmail.com)
Additional functions for manuscript "Rapid processing and quantitative evaluation of multicontrast EPImix scans for adaptive multimodal imaging"
"""
# Additional functions to run EPImix comparison script
# for wbplot
from wbplot import pscalar
import numpy as np
from matplotlib import cm, lines
#from matplotlib import colors
#from matplotlib.colors import ListedColormap
# for nilearn masked plot
import nibabel as nb
import nilearn as nl
# colorbar
import matplotlib as mpl
import matplotlib.pyplot as plt
# for spin p-value
import scipy as sp
# formatting of p-values as powers of 10, modified from:
# https://stackoverflow.com/questions/25750170/show-decimal-places-and-scientific-notation-on-the-axis-of-a-matplotlib-plot/49330649#49330649
def pow_10_fmt(p):
if p < 1e-10:
return 'P < $10^{-10}$'
elif p > 0.001:
return 'P = '+str(round(p,3))#'%s' % float('%.3f' % p)
else:
s = "%1.2e" % p
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
significand = tup[0].rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
s = r'%s{\times}%s' % (significand, exponent)
else:
s = r'%s%s' % (significand, exponent)
return "P = ${}$".format(s)
# plot of high-res (360-ROI) parcellation, excluding "dropped" regions
def pscalar_mmp_hk(file_out, pscalars_hk, mmp_hk, orientation='landscape',
hemi=None, vrange=None, cmap='magma', transp=False):
# set vrange if it wasn't set before
if vrange is None:
vrange = (min(pscalars_hk),max(pscalars_hk))
# replace "dropped" regions values by value smaller than range (for mmp_h, there are 360 ROIs -> hardcoded)
pscalars = np.ones(360)*(vrange[0]-1); pscalars[mmp_hk] = pscalars_hk
# # edit colorbar to add grey as minimum value
# cmap_nan = cm.get_cmap(cmap, 256).colors
# cmap_nan[0,0:3] = colors.to_rgb('grey')
# cmap_nan_mpl=ListedColormap(cmap_nan)
# edit colorbar to add grey as minimum value
cmap_under = cm.get_cmap(cmap, 256)
cmap_under.set_under('grey')
# call pscalar function with new values
pscalar(file_out, pscalars, orientation='landscape',
hemisphere=hemi, vrange=vrange, cmap=cmap_under, transparent=transp) # cmap_nan_mpl
# plot of low-res (44-ROI) parcellation, excluding "dropped" regions
def pscalar_mmp_lk(file_out, pscalars_lk, mmp_lk, mmp_ds_ids, orientation='landscape',
hemi=None, vrange=None, cmap='magma', transp=False):
# set vrange if it wasn't set before
if vrange is None:
vrange = (min(pscalars_lk),max(pscalars_lk))
# replace "dropped" regions values by value smaller than range (for mmp_h, there are 44 ROIs -> hardcoded)
pscalars = np.ones(44)*(vrange[0]-1); pscalars[mmp_lk] = pscalars_lk
# # edit colorbar to add grey as minimum value
# cmap_nan = cm.get_cmap(cmap, 256).colors
# cmap_nan[0,0:3] = colors.to_rgb('grey')
# cmap_nan_mpl=ListedColormap(cmap_nan)
# edit colorbar to add grey as minimum value
cmap_under = cm.get_cmap(cmap, 256)
cmap_under.set_under('grey')
# set vrange if it wasn't set before
if vrange is None:
vrange = (min(pscalars_lk),max(pscalars_lk))
# call pscalar function with new values
pscalar(file_out, pscalars[mmp_ds_ids], orientation='landscape',
hemisphere=hemi, vrange=vrange, cmap=cmap_under, transparent=transp) # cmap_nan_mpl
# plot colorbar
def plot_cbar(c_lim, cmap_nm, c_label, lbs, save_path):
f, ax = plt.subplots(figsize=(6, 0.75)); f.subplots_adjust(bottom=0.65)
cmap = cm.get_cmap(cmap_nm, 256) #mpl.cm.plasma_r
norm = mpl.colors.Normalize(vmin=c_lim[0], vmax=c_lim[1])
cb1 = mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, orientation='horizontal')
cb1.set_label(c_label, size=lbs)
if save_path[-3:]=='png':
plt.savefig(save_path, dpi=500)
elif save_path[-3:]=='svg':
plt.savefig(save_path)
# Median Absolute Deviation
def mad(a, axis=None):
"""
Compute *Median Absolute Deviation* of an array along given axis.
"""
# Median along given axis, but *keeping* the reduced axis so that result can still broadcast against a.
med = np.nanmedian(a, axis=axis, keepdims=True)
mad = np.nanmedian(np.absolute(a - med), axis=axis) # MAD along given axis
return mad
def kth_diag_indices(a, k):
rows, cols = np.diag_indices_from(a)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
def plot_nl_image_masked(img_vec,mask_vec,img_shape,img_affine,cmap,clim=None,*line_args,**line_kwargs):
if clim is None:
#clim = (min(img_vec[mask_vec==1]),max(img_vec[mask_vec==1]))
clim = (min(img_vec[mask_vec==1]),np.percentile(img_vec[mask_vec==1],95))
# i) edit image and colorbar to map background to black
img_masked = np.ones(img_vec.size)*(clim[0]-1); img_masked[mask_vec==1] = img_vec[mask_vec==1]
cmap_under = cm.get_cmap(cmap, 256); cmap_under.set_under('white')
# ii) convert image to nii and plot
img_masked_nii = nb.Nifti1Image(np.reshape(img_masked,img_shape),affine=img_affine)
nl.plotting.plot_img(img_masked_nii,colorbar=True,cmap=cmap_under, vmin=clim[0], vmax=clim[1],*line_args,**line_kwargs)
def add_subnetwork_lines(hm,roi_nums,*line_args,**line_kwargs):
hm.hlines([0]+[i-0.25 for i in np.cumsum(roi_nums)], *hm.get_xlim(),*line_args,**line_kwargs); hm.vlines([0]+[i-0.25 for i in np.cumsum(roi_nums)], *hm.get_ylim(),*line_args,**line_kwargs)
def add_subnetwork_colours(hm,bb,roi_nums,roi_cols,*line_args,**line_kwargs):
# add network colour lines
ax2 = plt.axes([0,0,1,1], facecolor=(1,1,1,0)); ax2.axis("off"); #ax2.get_xaxis().set_visible(False), ax2.get_yaxis().set_visible(False)
temp_nroi_cum = [0]+[i-0.25 for i in np.cumsum(roi_nums)]
for i in range(len(roi_nums)):
ax2.add_line(lines.Line2D([bb[0,0]-0.02*(bb[1,0]-bb[0,0]) ,bb[0,0]-0.02*(bb[1,0]-bb[0,0])], [bb[1,1]-(bb[1,1]-bb[0,1])*(temp_nroi_cum[i]/sum(roi_nums)) ,bb[1,1]-(bb[1,1]-bb[0,1])*(temp_nroi_cum[i+1]/sum(roi_nums))], color=roi_cols[i], *line_args,**line_kwargs))
ax2.add_line(lines.Line2D([bb[0,0]+(bb[1,0]-bb[0,0])*(temp_nroi_cum[i]/sum(roi_nums)) ,bb[0,0]+(bb[1,0]-bb[0,0])*(temp_nroi_cum[i+1]/sum(roi_nums))], [bb[1,1]+0.02*(bb[1,1]-bb[0,1]) ,bb[1,1]+0.02*(bb[1,1]-bb[0,1])], color=roi_cols[i], *line_args,**line_kwargs))
def adjust_lightness(color, amount=0.5):
# from: https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def perm_sphere_p(x,y,perm_id,corr_type='spearman'):
# Function to generate a p-value for the spatial correlation between two parcellated cortical surface maps,
# using a set of spherical permutations of regions of interest.
# The function performs the permutation in both directions; i.e.: by permute both measures,
# before correlating each permuted measure to the unpermuted version of the other measure
#
# Inputs:
# x one of two maps to be correlated vector
# y second of two maps to be correlated vector
# perm_id array of permutations, from set of regions to itself (as generated by "rotate_parcellation") array of size [n(total regions) x nrot]
# corr_type type of correlation "spearman" (default) or "pearson"
#
# Output:
# p_perm permutation p-value
nroi = perm_id.shape[0] # number of regions
nperm = perm_id.shape[1] # number of permutations
if corr_type=='spearman':
rho_emp = sp.stats.spearmanr(x,y)[0]
elif corr_type=='pearson':
rho_emp = sp.stats.pearsonr(x,y)[0]
# permutation of measures
x_perm = y_perm = np.zeros((nroi,nperm))
for r in range(nperm):
for i in range(nroi):
x_perm[i,r] = x[perm_id[i,r]]
y_perm[i,r] = y[perm_id[i,r]]
# correlation to unpermuted measures
rho_null_xy = np.zeros(nperm)
rho_null_yx = np.zeros(nperm)
if corr_type=='spearman':
for r in range(nperm):
rho_null_xy[r] = sp.stats.spearmanr(x_perm[:,r],y)[0]
rho_null_yx[r] = sp.stats.spearmanr(y_perm[:,r],x)[0]
elif corr_type=='pearson':
for r in range(nperm):
rho_null_xy[r] = sp.stats.pearsonr(x_perm[:,r],y)[0]
rho_null_yx[r] = sp.stats.pearsonr(y_perm[:,r],x)[0]
# p-value definition depends on the sign of the empirical correlation
if (rho_emp>0):
p_perm_xy = sum(rho_null_xy>rho_emp)/nperm
p_perm_yx = sum(rho_null_yx>rho_emp)/nperm
else:
p_perm_xy = sum(rho_null_xy<rho_emp)/nperm
p_perm_yx = sum(rho_null_yx<rho_emp)/nperm
# return average p-value
return((p_perm_xy+p_perm_yx)/2) | 43.290179 | 269 | 0.643189 |
from wbplot import pscalar
import numpy as np
from matplotlib import cm, lines
import nibabel as nb
import nilearn as nl
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy as sp
10_fmt(p):
if p < 1e-10:
return 'P < $10^{-10}$'
elif p > 0.001:
return 'P = '+str(round(p,3))
else:
s = "%1.2e" % p
decimal_point = '.'
positive_sign = '+'
tup = s.split('e')
significand = tup[0].rstrip(decimal_point)
sign = tup[1][0].replace(positive_sign, '')
exponent = tup[1][1:].lstrip('0')
if exponent:
exponent = '10^{%s%s}' % (sign, exponent)
if significand and exponent:
s = r'%s{\times}%s' % (significand, exponent)
else:
s = r'%s%s' % (significand, exponent)
return "P = ${}$".format(s)
def pscalar_mmp_hk(file_out, pscalars_hk, mmp_hk, orientation='landscape',
hemi=None, vrange=None, cmap='magma', transp=False):
if vrange is None:
vrange = (min(pscalars_hk),max(pscalars_hk))
# replace "dropped" regions values by value smaller than range (for mmp_h, there are 360 ROIs -> hardcoded)
pscalars = np.ones(360)*(vrange[0]-1); pscalars[mmp_hk] = pscalars_hk
# # edit colorbar to add grey as minimum value
# cmap_nan = cm.get_cmap(cmap, 256).colors
# cmap_nan[0,0:3] = colors.to_rgb('grey')
# cmap_nan_mpl=ListedColormap(cmap_nan)
# edit colorbar to add grey as minimum value
cmap_under = cm.get_cmap(cmap, 256)
cmap_under.set_under('grey')
# call pscalar function with new values
pscalar(file_out, pscalars, orientation='landscape',
hemisphere=hemi, vrange=vrange, cmap=cmap_under, transparent=transp) # cmap_nan_mpl
# plot of low-res (44-ROI) parcellation, excluding "dropped" regions
def pscalar_mmp_lk(file_out, pscalars_lk, mmp_lk, mmp_ds_ids, orientation='landscape',
hemi=None, vrange=None, cmap='magma', transp=False):
# set vrange if it wasn't set before
if vrange is None:
vrange = (min(pscalars_lk),max(pscalars_lk))
pscalars = np.ones(44)*(vrange[0]-1); pscalars[mmp_lk] = pscalars_lk
m.get_cmap(cmap, 256)
cmap_under.set_under('grey')
if vrange is None:
vrange = (min(pscalars_lk),max(pscalars_lk))
# call pscalar function with new values
pscalar(file_out, pscalars[mmp_ds_ids], orientation='landscape',
hemisphere=hemi, vrange=vrange, cmap=cmap_under, transparent=transp) # cmap_nan_mpl
# plot colorbar
def plot_cbar(c_lim, cmap_nm, c_label, lbs, save_path):
f, ax = plt.subplots(figsize=(6, 0.75)); f.subplots_adjust(bottom=0.65)
cmap = cm.get_cmap(cmap_nm, 256) #mpl.cm.plasma_r
norm = mpl.colors.Normalize(vmin=c_lim[0], vmax=c_lim[1])
cb1 = mpl.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, orientation='horizontal')
cb1.set_label(c_label, size=lbs)
if save_path[-3:]=='png':
plt.savefig(save_path, dpi=500)
elif save_path[-3:]=='svg':
plt.savefig(save_path)
# Median Absolute Deviation
def mad(a, axis=None):
# Median along given axis, but *keeping* the reduced axis so that result can still broadcast against a.
med = np.nanmedian(a, axis=axis, keepdims=True)
mad = np.nanmedian(np.absolute(a - med), axis=axis) # MAD along given axis
return mad
def kth_diag_indices(a, k):
rows, cols = np.diag_indices_from(a)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
def plot_nl_image_masked(img_vec,mask_vec,img_shape,img_affine,cmap,clim=None,*line_args,**line_kwargs):
if clim is None:
#clim = (min(img_vec[mask_vec==1]),max(img_vec[mask_vec==1]))
clim = (min(img_vec[mask_vec==1]),np.percentile(img_vec[mask_vec==1],95))
# i) edit image and colorbar to map background to black
img_masked = np.ones(img_vec.size)*(clim[0]-1); img_masked[mask_vec==1] = img_vec[mask_vec==1]
cmap_under = cm.get_cmap(cmap, 256); cmap_under.set_under('white')
# ii) convert image to nii and plot
img_masked_nii = nb.Nifti1Image(np.reshape(img_masked,img_shape),affine=img_affine)
nl.plotting.plot_img(img_masked_nii,colorbar=True,cmap=cmap_under, vmin=clim[0], vmax=clim[1],*line_args,**line_kwargs)
def add_subnetwork_lines(hm,roi_nums,*line_args,**line_kwargs):
hm.hlines([0]+[i-0.25 for i in np.cumsum(roi_nums)], *hm.get_xlim(),*line_args,**line_kwargs); hm.vlines([0]+[i-0.25 for i in np.cumsum(roi_nums)], *hm.get_ylim(),*line_args,**line_kwargs)
def add_subnetwork_colours(hm,bb,roi_nums,roi_cols,*line_args,**line_kwargs):
# add network colour lines
ax2 = plt.axes([0,0,1,1], facecolor=(1,1,1,0)); ax2.axis("off"); #ax2.get_xaxis().set_visible(False), ax2.get_yaxis().set_visible(False)
temp_nroi_cum = [0]+[i-0.25 for i in np.cumsum(roi_nums)]
for i in range(len(roi_nums)):
ax2.add_line(lines.Line2D([bb[0,0]-0.02*(bb[1,0]-bb[0,0]) ,bb[0,0]-0.02*(bb[1,0]-bb[0,0])], [bb[1,1]-(bb[1,1]-bb[0,1])*(temp_nroi_cum[i]/sum(roi_nums)) ,bb[1,1]-(bb[1,1]-bb[0,1])*(temp_nroi_cum[i+1]/sum(roi_nums))], color=roi_cols[i], *line_args,**line_kwargs))
ax2.add_line(lines.Line2D([bb[0,0]+(bb[1,0]-bb[0,0])*(temp_nroi_cum[i]/sum(roi_nums)) ,bb[0,0]+(bb[1,0]-bb[0,0])*(temp_nroi_cum[i+1]/sum(roi_nums))], [bb[1,1]+0.02*(bb[1,1]-bb[0,1]) ,bb[1,1]+0.02*(bb[1,1]-bb[0,1])], color=roi_cols[i], *line_args,**line_kwargs))
def adjust_lightness(color, amount=0.5):
# from: https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def perm_sphere_p(x,y,perm_id,corr_type='spearman'):
# Function to generate a p-value for the spatial correlation between two parcellated cortical surface maps,
# using a set of spherical permutations of regions of interest.
# The function performs the permutation in both directions; i.e.: by permute both measures,
# before correlating each permuted measure to the unpermuted version of the other measure
#
# Inputs:
# x one of two maps to be correlated vector
# y second of two maps to be correlated vector
# perm_id array of permutations, from set of regions to itself (as generated by "rotate_parcellation") array of size [n(total regions) x nrot]
# corr_type type of correlation "spearman" (default) or "pearson"
#
# Output:
# p_perm permutation p-value
nroi = perm_id.shape[0] # number of regions
nperm = perm_id.shape[1] # number of permutations
if corr_type=='spearman':
rho_emp = sp.stats.spearmanr(x,y)[0]
elif corr_type=='pearson':
rho_emp = sp.stats.pearsonr(x,y)[0]
# permutation of measures
x_perm = y_perm = np.zeros((nroi,nperm))
for r in range(nperm):
for i in range(nroi):
x_perm[i,r] = x[perm_id[i,r]]
y_perm[i,r] = y[perm_id[i,r]]
# correlation to unpermuted measures
rho_null_xy = np.zeros(nperm)
rho_null_yx = np.zeros(nperm)
if corr_type=='spearman':
for r in range(nperm):
rho_null_xy[r] = sp.stats.spearmanr(x_perm[:,r],y)[0]
rho_null_yx[r] = sp.stats.spearmanr(y_perm[:,r],x)[0]
elif corr_type=='pearson':
for r in range(nperm):
rho_null_xy[r] = sp.stats.pearsonr(x_perm[:,r],y)[0]
rho_null_yx[r] = sp.stats.pearsonr(y_perm[:,r],x)[0]
# p-value definition depends on the sign of the empirical correlation
if (rho_emp>0):
p_perm_xy = sum(rho_null_xy>rho_emp)/nperm
p_perm_yx = sum(rho_null_yx>rho_emp)/nperm
else:
p_perm_xy = sum(rho_null_xy<rho_emp)/nperm
p_perm_yx = sum(rho_null_yx<rho_emp)/nperm
# return average p-value
return((p_perm_xy+p_perm_yx)/2) | true | true |
1c39873b51a9cef0ed9bb39b182792eaa9448ecd | 4,767 | py | Python | src/dev/hashing-algorithms.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 10 | 2019-01-18T19:11:54.000Z | 2022-03-16T08:39:36.000Z | src/dev/hashing-algorithms.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 2 | 2019-02-19T15:10:44.000Z | 2019-02-26T04:26:24.000Z | src/dev/hashing-algorithms.py | momacs/pram | d2de43ea447d13a65d814f781ec86889754f76fe | [
"BSD-3-Clause"
] | 3 | 2019-02-19T15:11:08.000Z | 2021-08-20T11:51:04.000Z | '''
Resources
Algorithms
https://github.com/ifduyue/python-xxhash
https://pypi.org/project/cityhash/
https://pypi.org/project/mmh3/
https://docs.python.org/3/library/zlib.html
Other
http://slidedeck.io/LawnGnome/non-cryptographic-hashing
'''
import cityhash,dill,inspect,json,mmh3,pickle,time,xxhash,zlib
import os,sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pram.entity import Site
# ----------------------------------------------------------------------------------------------------------------------
# Parameters:
N = 100_000
attr = { 'flu': 's', 'age_group': '10_19', 'is_migrating': True, 't-migration': 3, 'history': [2,4,8.0] }
rel = { 'home': Site('home-x').__hash__(), 'school': Site('school-01').__hash__() }
cond = [lambda x: x > 1, lambda x: x < 1, lambda x: x == 1]
full = False
# ----------------------------------------------------------------------------------------------------------------------
# Algorithms:
# (1) hash + str + inspect.getsource (64):
t0 = time.time()
for i in range(N):
hash(str((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'01: {time.time() - t0}')
# (2) hash + json + inspect.getsource (64):
t0 = time.time()
for i in range(N):
hash(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True))
print(f'02: {time.time() - t0}')
# (3) xxh64 + str + inspect.getsource (64):
t0 = time.time()
for i in range(N):
xxhash.xxh64(str((attr, rel, str([inspect.getsource(i) for i in cond]), full))).intdigest()
print(f'03: {time.time() - t0}')
# (4) xxh64 + json + inspect.getsource (64):
t0 = time.time()
for i in range(N):
xxhash.xxh64(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True)).intdigest()
print(f'04: {time.time() - t0}')
# (5) cityhash + str + inspect.getsource (64):
t0 = time.time()
for i in range(N):
cityhash.CityHash64(str((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'05: {time.time() - t0}')
# (6) cityhash + json + inspect.getsource (64):
t0 = time.time()
for i in range(N):
cityhash.CityHash64(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True))
print(f'06: {time.time() - t0}')
# (7) murmur3 + str + inspect.getsource (32):
t0 = time.time()
for i in range(N):
mmh3.hash(str((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'07: {time.time() - t0}')
# (8) murmur3 + json + inspect.getsource (32):
t0 = time.time()
for i in range(N):
mmh3.hash(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True))
print(f'08: {time.time() - t0}')
# (9) adler + pickle + inspect.getsource (32):
t0 = time.time()
for i in range(N):
zlib.adler32(pickle.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'09: {time.time() - t0}')
# (10) adler + dill + inspect.getsource (32):
t0 = time.time()
for i in range(N):
zlib.adler32(dill.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'10: {time.time() - t0}')
# (11) adler + str.encode + inspect.getsource (32):
t0 = time.time()
for i in range(N):
zlib.adler32(str.encode(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True)))
print(f'11: {time.time() - t0}')
# (12) crc + pickle + inspect.getsource (32):
t0 = time.time()
for i in range(N):
zlib.adler32(pickle.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'12: {time.time() - t0}')
# (13) crc + dill + inspect.getsource (32):
t0 = time.time()
for i in range(N):
zlib.adler32(dill.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'13: {time.time() - t0}')
# (14) crc + str.encode + inspect.getsource (32):
t0 = time.time()
for i in range(N):
zlib.adler32(str.encode(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True)))
print(f'14: {time.time() - t0}')
# ----------------------------------------------------------------------------------------------------------------------
# Results:
# N = 100_000
#
# 09: 42.79729390144348
# 07: 42.879645347595215
# 01: 42.90272378921509
# 12: 42.948513984680176
# 05: 43.01556396484375
# 03: 43.406972885131836
# 04: 43.84713292121887 <-- is 64b and sorts keys (xxhash)
# 06: 43.96315002441406 <-- is 64b and sorts keys (citihash)
# 14: 44.02376699447632
# 11: 44.02756714820862
# 02: 45.38511109352112
# 08: 46.35863995552063
# 13: 54.52591300010681
# 10: 54.58932113647461
# ----------------------------------------------------------------------------------------------------------------------
# (zlib.adler32(strg, perturber) << N) ^ hash(strg)
| 33.808511 | 120 | 0.57898 |
import cityhash,dill,inspect,json,mmh3,pickle,time,xxhash,zlib
import os,sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pram.entity import Site
N = 100_000
attr = { 'flu': 's', 'age_group': '10_19', 'is_migrating': True, 't-migration': 3, 'history': [2,4,8.0] }
rel = { 'home': Site('home-x').__hash__(), 'school': Site('school-01').__hash__() }
cond = [lambda x: x > 1, lambda x: x < 1, lambda x: x == 1]
full = False
t0 = time.time()
for i in range(N):
hash(str((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'01: {time.time() - t0}')
t0 = time.time()
for i in range(N):
hash(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True))
print(f'02: {time.time() - t0}')
t0 = time.time()
for i in range(N):
xxhash.xxh64(str((attr, rel, str([inspect.getsource(i) for i in cond]), full))).intdigest()
print(f'03: {time.time() - t0}')
t0 = time.time()
for i in range(N):
xxhash.xxh64(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True)).intdigest()
print(f'04: {time.time() - t0}')
t0 = time.time()
for i in range(N):
cityhash.CityHash64(str((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'05: {time.time() - t0}')
t0 = time.time()
for i in range(N):
cityhash.CityHash64(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True))
print(f'06: {time.time() - t0}')
t0 = time.time()
for i in range(N):
mmh3.hash(str((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'07: {time.time() - t0}')
t0 = time.time()
for i in range(N):
mmh3.hash(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True))
print(f'08: {time.time() - t0}')
t0 = time.time()
for i in range(N):
zlib.adler32(pickle.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'09: {time.time() - t0}')
t0 = time.time()
for i in range(N):
zlib.adler32(dill.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'10: {time.time() - t0}')
t0 = time.time()
for i in range(N):
zlib.adler32(str.encode(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True)))
print(f'11: {time.time() - t0}')
t0 = time.time()
for i in range(N):
zlib.adler32(pickle.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'12: {time.time() - t0}')
t0 = time.time()
for i in range(N):
zlib.adler32(dill.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full)))
print(f'13: {time.time() - t0}')
t0 = time.time()
for i in range(N):
zlib.adler32(str.encode(json.dumps((attr, rel, str([inspect.getsource(i) for i in cond]), full), sort_keys=True)))
print(f'14: {time.time() - t0}')
| true | true |
1c3987a79dd744f9b321256a697b4990832a9bcf | 6,977 | py | Python | model.py | jessicapetrochuk/Detectron_2_Image_Segmentation | 67ab6fb03b90a298367c86eab0d89a2d8438169a | [
"MIT"
] | null | null | null | model.py | jessicapetrochuk/Detectron_2_Image_Segmentation | 67ab6fb03b90a298367c86eab0d89a2d8438169a | [
"MIT"
] | null | null | null | model.py | jessicapetrochuk/Detectron_2_Image_Segmentation | 67ab6fb03b90a298367c86eab0d89a2d8438169a | [
"MIT"
] | null | null | null | import torch
import natsort
import numpy as np
import pycocotools
from PIL import Image
import os, cv2, random
import torchvision.ops as ops
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.structures import BoxMode
from detectron2.engine import DefaultTrainer
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import ColorMode
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
def get_masks(num_imgs):
"""
Loops through all masks in mask directory and returns the masks as an array and the bounding boxes
Arguments:
num_images (int): number of images that are being used to train the model
Returns:
bboxes (array of size [N, 4]): bboxes where each bbox is (x1, y1, x2, y2)
masks (array of size [N, H, W]): masks from directory as binary array
"""
bboxes = []
masks = []
for i in range(num_imgs):
masks_subdir = '/Users/jessicapetrochuk/Documents/School/UBC/2021-2022/Directed Studies/Code/Detectron_2/myDATASET/masks_with_rotations/section_masks_{}'.format(i) #Change to the directory masks are in
# Looping through all images in the images directory
for mask in sorted(os.listdir(masks_subdir)):
if not mask.startswith('.'):
full_path = os.path.join(masks_subdir, mask)
mask_img = Image.open(full_path).convert("1")
mask_array = np.asarray(mask_img)
mask_array_bin = np.where(mask_array > 0.5, 1, 0).astype(np.uint8)
mask_tensor = torch.tensor(mask_array_bin).unsqueeze(0)
bbox = ops.masks_to_boxes(mask_tensor)
bbox_list = bbox.tolist()
mask_array = pycocotools.mask.encode(np.asarray(mask_array, order="F"))
masks.append(mask_array)
bboxes.append(bbox_list[0])
print(i, ':', masks_subdir)
print('Done getting masks and bounding boxes')
return bboxes, masks
def get_masks_dict(bboxes, masks):
print('starting getting dataset dictionary')
dataset_dicts = []
images_path = "/Users/jessicapetrochuk/Documents/School/UBC/2021-2022/Directed Studies/Code/Detectron_2/myDATASET/images_with_rotations"
image_files = os.listdir(images_path)
image_files_sorted = natsort.natsorted(image_files,reverse=False)
img_id = 0
if img_id < 227:
for image in image_files_sorted:
record = {}
if not image.startswith('.'):
filename = os.path.join(images_path, image)
height, width = cv2.imread(filename).shape[:2]
record['file_name'] = filename
record['image_id'] = img_id
record['height'] = height
record['width'] = width
annotations = []
# fix when there are multiple regions
annotation_hippocampus = {}
annotation_hippocampus['bbox'] = bboxes[img_id]
annotation_hippocampus['bbox_mode'] = BoxMode.XYXY_ABS
annotation_hippocampus['category_id'] = 0
annotation_hippocampus['segmentation'] = masks[img_id]
annotations.append(annotation_hippocampus)
record['annotations'] = annotations
dataset_dicts.append(record)
img_id += 1
return dataset_dicts
def visualize(dataset_dicts):
for d in random.sample(dataset_dicts, 3):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=None, scale=0.5)
out = visualizer.draw_dataset_dict(d)
cv2.imshow('', out.get_image()[:, :, ::-1])
def train():
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
print("torch: ", TORCH_VERSION, "; cuda: ", CUDA_VERSION)
num_imgs = 227
bboxes, masks = get_masks(num_imgs)
dataset_dicts = get_masks_dict(bboxes, masks)
# for d in ["train", "val"]:
for d in ["train"]:
DatasetCatalog.register("brain_" + d, lambda d=d: get_masks_dict(bboxes, masks))
MetadataCatalog.get("brain_" + d).set(thing_classes=["hippocampus"])
brain_metadata = MetadataCatalog.get("brain_train")
# DatasetCatalog.register("my_dataset", my_dataset_function)
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.INPUT.MASK_FORMAT='bitmask'
cfg.MODEL.DEVICE = 'cpu'
cfg.DATASETS.TRAIN = ("brain_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
cfg.SOLVER.STEPS = [] # do not decay learning rate
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)
# NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here.
# os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
# trainer = DefaultTrainer(cfg)
# trainer.resume_or_load(resume=False)
# trainer.train()
#Inference and evaluation
# Inference should use the config with parameters that are used in training
# cfg now already contains everything we've set previously. We changed it a little bit for inference:
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
for d in random.sample(dataset_dicts, 3):
im = cv2.imread(d["file_name"])
outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
v = Visualizer(im[:, :, ::-1],
metadata=brain_metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
print(out.get_image())
cv2.imshow('', out.get_image()[:, :, ::-1])
cv2.imwrite('hello.png', out.get_image()[:, :, ::-1])
#setup_logger()
if __name__ == '__main__':
train() | 44.724359 | 210 | 0.662606 | import torch
import natsort
import numpy as np
import pycocotools
from PIL import Image
import os, cv2, random
import torchvision.ops as ops
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.structures import BoxMode
from detectron2.engine import DefaultTrainer
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import ColorMode
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
def get_masks(num_imgs):
bboxes = []
masks = []
for i in range(num_imgs):
masks_subdir = '/Users/jessicapetrochuk/Documents/School/UBC/2021-2022/Directed Studies/Code/Detectron_2/myDATASET/masks_with_rotations/section_masks_{}'.format(i)
for mask in sorted(os.listdir(masks_subdir)):
if not mask.startswith('.'):
full_path = os.path.join(masks_subdir, mask)
mask_img = Image.open(full_path).convert("1")
mask_array = np.asarray(mask_img)
mask_array_bin = np.where(mask_array > 0.5, 1, 0).astype(np.uint8)
mask_tensor = torch.tensor(mask_array_bin).unsqueeze(0)
bbox = ops.masks_to_boxes(mask_tensor)
bbox_list = bbox.tolist()
mask_array = pycocotools.mask.encode(np.asarray(mask_array, order="F"))
masks.append(mask_array)
bboxes.append(bbox_list[0])
print(i, ':', masks_subdir)
print('Done getting masks and bounding boxes')
return bboxes, masks
def get_masks_dict(bboxes, masks):
print('starting getting dataset dictionary')
dataset_dicts = []
images_path = "/Users/jessicapetrochuk/Documents/School/UBC/2021-2022/Directed Studies/Code/Detectron_2/myDATASET/images_with_rotations"
image_files = os.listdir(images_path)
image_files_sorted = natsort.natsorted(image_files,reverse=False)
img_id = 0
if img_id < 227:
for image in image_files_sorted:
record = {}
if not image.startswith('.'):
filename = os.path.join(images_path, image)
height, width = cv2.imread(filename).shape[:2]
record['file_name'] = filename
record['image_id'] = img_id
record['height'] = height
record['width'] = width
annotations = []
annotation_hippocampus = {}
annotation_hippocampus['bbox'] = bboxes[img_id]
annotation_hippocampus['bbox_mode'] = BoxMode.XYXY_ABS
annotation_hippocampus['category_id'] = 0
annotation_hippocampus['segmentation'] = masks[img_id]
annotations.append(annotation_hippocampus)
record['annotations'] = annotations
dataset_dicts.append(record)
img_id += 1
return dataset_dicts
def visualize(dataset_dicts):
for d in random.sample(dataset_dicts, 3):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=None, scale=0.5)
out = visualizer.draw_dataset_dict(d)
cv2.imshow('', out.get_image()[:, :, ::-1])
def train():
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
print("torch: ", TORCH_VERSION, "; cuda: ", CUDA_VERSION)
num_imgs = 227
bboxes, masks = get_masks(num_imgs)
dataset_dicts = get_masks_dict(bboxes, masks)
for d in ["train"]:
DatasetCatalog.register("brain_" + d, lambda d=d: get_masks_dict(bboxes, masks))
MetadataCatalog.get("brain_" + d).set(thing_classes=["hippocampus"])
brain_metadata = MetadataCatalog.get("brain_train")
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.INPUT.MASK_FORMAT='bitmask'
cfg.MODEL.DEVICE = 'cpu'
cfg.DATASETS.TRAIN = ("brain_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025
cfg.SOLVER.MAX_ITER = 300
cfg.SOLVER.STEPS = []
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
predictor = DefaultPredictor(cfg)
for d in random.sample(dataset_dicts, 3):
im = cv2.imread(d["file_name"])
outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
v = Visualizer(im[:, :, ::-1],
metadata=brain_metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
print(out.get_image())
cv2.imshow('', out.get_image()[:, :, ::-1])
cv2.imwrite('hello.png', out.get_image()[:, :, ::-1])
#setup_logger()
if __name__ == '__main__':
train() | true | true |
1c39885591456c32596c53902fb5486d9dfc348f | 3,719 | py | Python | lib/modules/powershell/situational_awareness/network/reverse_dns.py | jimshew/Empire | a7413869849b2c3c521eb50bed61e00ede107688 | [
"BSD-3-Clause"
] | 4 | 2019-03-11T02:40:13.000Z | 2021-01-29T06:45:21.000Z | lib/modules/powershell/situational_awareness/network/reverse_dns.py | jimshew/Empire | a7413869849b2c3c521eb50bed61e00ede107688 | [
"BSD-3-Clause"
] | 20 | 2020-03-11T03:58:21.000Z | 2020-03-12T03:59:38.000Z | lib/modules/powershell/situational_awareness/network/reverse_dns.py | jimshew/Empire | a7413869849b2c3c521eb50bed61e00ede107688 | [
"BSD-3-Clause"
] | 1 | 2020-01-15T06:46:16.000Z | 2020-01-15T06:46:16.000Z | from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-ReverseDNSLookup',
'Author': ['DarkOperator'],
'Description': ('Performs a DNS Reverse Lookup of a given IPv4 IP Range.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/darkoperator/Posh-SecMod/blob/master/Discovery/Discovery.psm1'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Range' : {
'Description' : "Range to perform reverse DNS on.",
'Required' : False,
'Value' : ''
},
'CIDR' : {
'Description' : "CIDR to perform reverse DNS on.",
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-ReverseDNSLookup.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Invoke-ReverseDNSLookup"
for option,values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
# only return objects where HostName is not an IP (i.e. the address resolves)
scriptEnd += " | % {try{$entry=$_; $ipObj = [System.Net.IPAddress]::parse($entry.HostName); if(-not [System.Net.IPAddress]::tryparse([string]$_.HostName, [ref]$ipObj)) { $entry }} catch{$entry} } | Select-Object HostName, AddressList | ft -autosize | Out-String | %{$_ + \"`n\"}"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| 36.821782 | 287 | 0.534015 | from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-ReverseDNSLookup',
'Author': ['DarkOperator'],
'Description': ('Performs a DNS Reverse Lookup of a given IPv4 IP Range.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/darkoperator/Posh-SecMod/blob/master/Discovery/Discovery.psm1'
]
}
self.options = {
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Range' : {
'Description' : "Range to perform reverse DNS on.",
'Required' : False,
'Value' : ''
},
'CIDR' : {
'Description' : "CIDR to perform reverse DNS on.",
'Required' : False,
'Value' : ''
}
}
self.mainMenu = mainMenu
for param in params:
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-ReverseDNSLookup.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(moduleSource)))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptEnd = "Invoke-ReverseDNSLookup"
for option,values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
# only return objects where HostName is not an IP (i.e. the address resolves)
scriptEnd += " | % {try{$entry=$_; $ipObj = [System.Net.IPAddress]::parse($entry.HostName); if(-not [System.Net.IPAddress]::tryparse([string]$_.HostName, [ref]$ipObj)) { $entry }} catch{$entry} } | Select-Object HostName, AddressList | ft -autosize | Out-String | %{$_ + \"`n\"}"
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
| true | true |
1c398a7a4236e27807795af5ede3b107f993b6d7 | 2,593 | py | Python | uncertainty_baselines/models/vit_batchensemble_gp_test.py | dvdzhang/uncertainty-baselines | 8ce0d7494e5cae0719c1b750da4b61564e536636 | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/models/vit_batchensemble_gp_test.py | dvdzhang/uncertainty-baselines | 8ce0d7494e5cae0719c1b750da4b61564e536636 | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/models/vit_batchensemble_gp_test.py | dvdzhang/uncertainty-baselines | 8ce0d7494e5cae0719c1b750da4b61564e536636 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the ViT-BatchEnsemble-GP model."""
from absl.testing import absltest
from absl.testing import parameterized
import jax
import uncertainty_baselines as ub
class PatchTransformerBEGPTest(parameterized.TestCase):
@parameterized.parameters(
((4, 4), None, 10, None, (32, 32), "token"),
(None, (4, 4), 100, 64, (32, 64), "token"),
((4, 4), None, 10, None, (32, 32), "gap"),
(None, (4, 4), 100, 64, (32, 64), "gap"),
((4, 4), None, 10, None, (32, 32), "map"),
(None, (4, 4), 100, 64, (32, 64), "map"),
)
def test_params_and_outputs_shapes(self, patch_size, patch_grid, num_classes,
representation_size,
expected_pre_logits_params_shape,
classifier):
ens_size = 2
batch_size = 3
# Gaussian process kwargs.
hidden_features = 1024
gp_layer_kwargs = dict(hidden_features=hidden_features)
model = ub.models.PatchTransformerBEGP(
train=False,
patch_size=patch_size,
patch_grid=patch_grid,
num_classes=num_classes,
representation_size=representation_size,
hidden_size=32,
transformer=dict(
num_heads=4,
num_layers=2,
mlp_dim=128,
ens_size=ens_size,
random_sign_init=0.5),
classifier=classifier,
gp_layer_kwargs=gp_layer_kwargs)
inputs = jax.random.normal(jax.random.PRNGKey(0), (batch_size, 16, 16, 3))
variables = model.init(jax.random.PRNGKey(0), inputs)
logits, extra = model.apply(variables, inputs)
self.assertEqual((ens_size * batch_size, num_classes), logits.shape)
self.assertEqual(extra["covmat"].shape, (ens_size * batch_size,))
if representation_size:
self.assertEqual(expected_pre_logits_params_shape,
variables["params"]["pre_logits"]["kernel"].shape)
if __name__ == "__main__":
absltest.main()
| 36.521127 | 79 | 0.650212 |
from absl.testing import absltest
from absl.testing import parameterized
import jax
import uncertainty_baselines as ub
class PatchTransformerBEGPTest(parameterized.TestCase):
@parameterized.parameters(
((4, 4), None, 10, None, (32, 32), "token"),
(None, (4, 4), 100, 64, (32, 64), "token"),
((4, 4), None, 10, None, (32, 32), "gap"),
(None, (4, 4), 100, 64, (32, 64), "gap"),
((4, 4), None, 10, None, (32, 32), "map"),
(None, (4, 4), 100, 64, (32, 64), "map"),
)
def test_params_and_outputs_shapes(self, patch_size, patch_grid, num_classes,
representation_size,
expected_pre_logits_params_shape,
classifier):
ens_size = 2
batch_size = 3
hidden_features = 1024
gp_layer_kwargs = dict(hidden_features=hidden_features)
model = ub.models.PatchTransformerBEGP(
train=False,
patch_size=patch_size,
patch_grid=patch_grid,
num_classes=num_classes,
representation_size=representation_size,
hidden_size=32,
transformer=dict(
num_heads=4,
num_layers=2,
mlp_dim=128,
ens_size=ens_size,
random_sign_init=0.5),
classifier=classifier,
gp_layer_kwargs=gp_layer_kwargs)
inputs = jax.random.normal(jax.random.PRNGKey(0), (batch_size, 16, 16, 3))
variables = model.init(jax.random.PRNGKey(0), inputs)
logits, extra = model.apply(variables, inputs)
self.assertEqual((ens_size * batch_size, num_classes), logits.shape)
self.assertEqual(extra["covmat"].shape, (ens_size * batch_size,))
if representation_size:
self.assertEqual(expected_pre_logits_params_shape,
variables["params"]["pre_logits"]["kernel"].shape)
if __name__ == "__main__":
absltest.main()
| true | true |
1c398ab1ab8ac3cbd9c97a4f6f62bf4d989e441d | 1,181 | py | Python | eve_db/migrations/0002_system_check_fixes.py | EVE-Tools/django-eve-db | 1b588ed789a785d9243c2f2cd2ba0c965d36ba77 | [
"BSD-3-Clause"
] | 3 | 2015-03-05T20:49:27.000Z | 2017-05-27T17:53:00.000Z | eve_db/migrations/0002_system_check_fixes.py | EVE-Tools/django-eve-db | 1b588ed789a785d9243c2f2cd2ba0c965d36ba77 | [
"BSD-3-Clause"
] | 1 | 2018-01-20T10:06:05.000Z | 2018-01-20T10:08:58.000Z | eve_db/migrations/0002_system_check_fixes.py | EVE-Tools/django-eve-db | 1b588ed789a785d9243c2f2cd2ba0c965d36ba77 | [
"BSD-3-Clause"
] | 1 | 2015-02-27T17:18:42.000Z | 2015-02-27T17:18:42.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('eve_db', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='invmetatype',
name='type',
field=models.OneToOneField(related_name='inventorymetatype_type_set', primary_key=True, serialize=False, to='eve_db.InvType'),
),
migrations.AlterField(
model_name='mapcelestialstatistic',
name='celestial',
field=models.OneToOneField(primary_key=True, serialize=False, to='eve_db.MapDenormalize'),
),
migrations.AlterField(
model_name='mapjump',
name='origin_gate',
field=models.OneToOneField(related_name='stargate_jump_origin_set', primary_key=True, serialize=False, to='eve_db.MapDenormalize'),
),
migrations.AlterField(
model_name='maplocationwormholeclass',
name='location',
field=models.OneToOneField(primary_key=True, serialize=False, to='eve_db.MapDenormalize'),
),
]
| 33.742857 | 143 | 0.635902 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('eve_db', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='invmetatype',
name='type',
field=models.OneToOneField(related_name='inventorymetatype_type_set', primary_key=True, serialize=False, to='eve_db.InvType'),
),
migrations.AlterField(
model_name='mapcelestialstatistic',
name='celestial',
field=models.OneToOneField(primary_key=True, serialize=False, to='eve_db.MapDenormalize'),
),
migrations.AlterField(
model_name='mapjump',
name='origin_gate',
field=models.OneToOneField(related_name='stargate_jump_origin_set', primary_key=True, serialize=False, to='eve_db.MapDenormalize'),
),
migrations.AlterField(
model_name='maplocationwormholeclass',
name='location',
field=models.OneToOneField(primary_key=True, serialize=False, to='eve_db.MapDenormalize'),
),
]
| true | true |
1c398b26894a66dfc4bc630632f55b9f9bc52638 | 186 | py | Python | src/generator/hide.py | lambdacasserole/zero-width-js | 27aee94612afcebabc4db08b0b4d0a89661493ad | [
"Unlicense"
] | 4 | 2019-09-02T03:45:15.000Z | 2022-03-19T01:20:38.000Z | src/generator/hide.py | lambdacasserole/zero-width-js | 27aee94612afcebabc4db08b0b4d0a89661493ad | [
"Unlicense"
] | null | null | null | src/generator/hide.py | lambdacasserole/zero-width-js | 27aee94612afcebabc4db08b0b4d0a89661493ad | [
"Unlicense"
] | null | null | null | import sys
# Convert binary to zero-width string.
binary = sys.stdin.read()
lookup = {'0': '\u200b', '1': '\u200c', ' ': '\u200d'}
for char in binary:
print(lookup[char], end = '')
| 23.25 | 54 | 0.607527 | import sys
binary = sys.stdin.read()
lookup = {'0': '\u200b', '1': '\u200c', ' ': '\u200d'}
for char in binary:
print(lookup[char], end = '')
| true | true |
1c398c64bee725e08491bc10a6721b75e46eeae2 | 1,207 | py | Python | k8s/k8splugin/exceptions.py | onap/dcaegen2-platform-plugins | 64131311ba1d01ff7d20bca0c14d30a006b2e712 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2020-07-14T14:22:04.000Z | 2020-07-14T14:22:04.000Z | k8s/k8splugin/exceptions.py | alex-sh2020/dcaegen2-platform-plugins | c5abb9b34468400bdcdd3ce23595af41ac03cd80 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | k8s/k8splugin/exceptions.py | alex-sh2020/dcaegen2-platform-plugins | c5abb9b34468400bdcdd3ce23595af41ac03cd80 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2020-07-14T19:02:05.000Z | 2020-07-14T19:02:05.000Z | # ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
class DockerPluginDeploymentError(RuntimeError):
pass
class DockerPluginDependencyNotReadyError(RuntimeError):
"""Error to use when something that this plugin depends upon e.g. docker api,
consul is not ready"""
pass
| 41.62069 | 82 | 0.570008 |
class DockerPluginDeploymentError(RuntimeError):
pass
class DockerPluginDependencyNotReadyError(RuntimeError):
pass
| true | true |
1c398c98d6cc4b33e1513064236da5cbc6ee025a | 3,643 | py | Python | pymongo/ismaster.py | llvtt/mongo-python-driver | 725f8342188e823ba90ab7d26e60e7a6bc43516a | [
"Apache-2.0"
] | null | null | null | pymongo/ismaster.py | llvtt/mongo-python-driver | 725f8342188e823ba90ab7d26e60e7a6bc43516a | [
"Apache-2.0"
] | null | null | null | pymongo/ismaster.py | llvtt/mongo-python-driver | 725f8342188e823ba90ab7d26e60e7a6bc43516a | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parse a response to the 'ismaster' command."""
import itertools
from bson.py3compat import imap
from pymongo import common
from pymongo.server_type import SERVER_TYPE
def _get_server_type(doc):
"""Determine the server type from an ismaster response."""
if not doc.get('ok'):
return SERVER_TYPE.Unknown
if doc.get('isreplicaset'):
return SERVER_TYPE.RSGhost
elif doc.get('setName'):
if doc.get('hidden'):
return SERVER_TYPE.RSOther
elif doc.get('ismaster'):
return SERVER_TYPE.RSPrimary
elif doc.get('secondary'):
return SERVER_TYPE.RSSecondary
elif doc.get('arbiterOnly'):
return SERVER_TYPE.RSArbiter
else:
return SERVER_TYPE.RSOther
elif doc.get('msg') == 'isdbgrid':
return SERVER_TYPE.Mongos
else:
return SERVER_TYPE.Standalone
class IsMaster(object):
__slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable')
def __init__(self, doc):
"""Parse an ismaster response from the server."""
self._server_type = _get_server_type(doc)
self._doc = doc
self._is_writable = self._server_type in (
SERVER_TYPE.RSPrimary,
SERVER_TYPE.Standalone,
SERVER_TYPE.Mongos)
self._is_readable = (
self.server_type == SERVER_TYPE.RSSecondary
or self._is_writable)
@property
def server_type(self):
return self._server_type
@property
def all_hosts(self):
"""List of hosts, passives, and arbiters known to this server."""
return set(imap(common.clean_node, itertools.chain(
self._doc.get('hosts', []),
self._doc.get('passives', []),
self._doc.get('arbiters', []))))
@property
def tags(self):
"""Replica set member tags or empty dict."""
return self._doc.get('tags', {})
@property
def primary(self):
"""This server's opinion about who the primary is, or None."""
if self._doc.get('primary'):
return common.partition_node(self._doc['primary'])
else:
return None
@property
def replica_set_name(self):
"""Replica set name or None."""
return self._doc.get('setName')
@property
def max_bson_size(self):
return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE)
@property
def max_message_size(self):
return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size)
@property
def max_write_batch_size(self):
return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE)
@property
def min_wire_version(self):
return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION)
@property
def max_wire_version(self):
return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION)
@property
def is_writable(self):
return self._is_writable
@property
def is_readable(self):
return self._is_readable
| 30.107438 | 78 | 0.653033 |
import itertools
from bson.py3compat import imap
from pymongo import common
from pymongo.server_type import SERVER_TYPE
def _get_server_type(doc):
if not doc.get('ok'):
return SERVER_TYPE.Unknown
if doc.get('isreplicaset'):
return SERVER_TYPE.RSGhost
elif doc.get('setName'):
if doc.get('hidden'):
return SERVER_TYPE.RSOther
elif doc.get('ismaster'):
return SERVER_TYPE.RSPrimary
elif doc.get('secondary'):
return SERVER_TYPE.RSSecondary
elif doc.get('arbiterOnly'):
return SERVER_TYPE.RSArbiter
else:
return SERVER_TYPE.RSOther
elif doc.get('msg') == 'isdbgrid':
return SERVER_TYPE.Mongos
else:
return SERVER_TYPE.Standalone
class IsMaster(object):
__slots__ = ('_doc', '_server_type', '_is_writable', '_is_readable')
def __init__(self, doc):
self._server_type = _get_server_type(doc)
self._doc = doc
self._is_writable = self._server_type in (
SERVER_TYPE.RSPrimary,
SERVER_TYPE.Standalone,
SERVER_TYPE.Mongos)
self._is_readable = (
self.server_type == SERVER_TYPE.RSSecondary
or self._is_writable)
@property
def server_type(self):
return self._server_type
@property
def all_hosts(self):
return set(imap(common.clean_node, itertools.chain(
self._doc.get('hosts', []),
self._doc.get('passives', []),
self._doc.get('arbiters', []))))
@property
def tags(self):
return self._doc.get('tags', {})
@property
def primary(self):
if self._doc.get('primary'):
return common.partition_node(self._doc['primary'])
else:
return None
@property
def replica_set_name(self):
return self._doc.get('setName')
@property
def max_bson_size(self):
return self._doc.get('maxBsonObjectSize', common.MAX_BSON_SIZE)
@property
def max_message_size(self):
return self._doc.get('maxMessageSizeBytes', 2 * self.max_bson_size)
@property
def max_write_batch_size(self):
return self._doc.get('maxWriteBatchSize', common.MAX_WRITE_BATCH_SIZE)
@property
def min_wire_version(self):
return self._doc.get('minWireVersion', common.MIN_WIRE_VERSION)
@property
def max_wire_version(self):
return self._doc.get('maxWireVersion', common.MAX_WIRE_VERSION)
@property
def is_writable(self):
return self._is_writable
@property
def is_readable(self):
return self._is_readable
| true | true |
1c398d11e3763438d71bc763f8054aa15e33c5a5 | 13,392 | py | Python | dask/array/tests/test_ufunc.py | Juanlu001/dask | ba29ba377ae71e5a90fa5ef5198c7d317b45c06a | [
"BSD-3-Clause"
] | 1 | 2021-11-02T18:53:23.000Z | 2021-11-02T18:53:23.000Z | dask/array/tests/test_ufunc.py | Juanlu001/dask | ba29ba377ae71e5a90fa5ef5198c7d317b45c06a | [
"BSD-3-Clause"
] | 1 | 2021-10-07T09:57:58.000Z | 2021-10-07T09:57:58.000Z | dask/array/tests/test_ufunc.py | Juanlu001/dask | ba29ba377ae71e5a90fa5ef5198c7d317b45c06a | [
"BSD-3-Clause"
] | 1 | 2022-02-26T15:15:40.000Z | 2022-02-26T15:15:40.000Z | import pickle
from functools import partial
from operator import add
import pytest
np = pytest.importorskip("numpy")
import dask.array as da
from dask.array.ufunc import da_frompyfunc
from dask.array.utils import assert_eq
from dask.base import tokenize
DISCLAIMER = """
This docstring was copied from numpy.{name}.
Some inconsistencies with the Dask version may exist.
"""
@pytest.mark.parametrize("name", ["log", "modf", "frexp"])
def test_ufunc_meta(name):
disclaimer = DISCLAIMER.format(name=name)
skip_test = " # doctest: +SKIP"
ufunc = getattr(da, name)
assert ufunc.__name__ == name
assert disclaimer in ufunc.__doc__
assert (
ufunc.__doc__.replace(disclaimer, "").replace(skip_test, "")
== getattr(np, name).__doc__
)
def test_ufunc():
for attr in ["nin", "nargs", "nout", "ntypes", "identity", "signature", "types"]:
assert getattr(da.log, attr) == getattr(np.log, attr)
with pytest.raises(AttributeError):
da.log.not_an_attribute
assert repr(da.log) == repr(np.log)
assert "nin" in dir(da.log)
assert "outer" in dir(da.log)
binary_ufuncs = [
"add",
"arctan2",
"copysign",
"divide",
"equal",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"floor_divide",
"fmax",
"fmin",
"fmod",
"greater",
"greater_equal",
"hypot",
"ldexp",
"less",
"less_equal",
"logaddexp",
"logaddexp2",
"logical_and",
"logical_or",
"logical_xor",
"maximum",
"minimum",
"mod",
"multiply",
"nextafter",
"not_equal",
"power",
"remainder",
"subtract",
"true_divide",
"float_power",
]
unary_ufuncs = [
"absolute",
"arccos",
"arccosh",
"arcsin",
"arcsinh",
"arctan",
"arctanh",
"bitwise_not",
"cbrt",
"ceil",
"conj",
"cos",
"cosh",
"deg2rad",
"degrees",
"exp",
"exp2",
"expm1",
"fabs",
"fix",
"floor",
"invert",
"isfinite",
"isinf",
"isnan",
"log",
"log10",
"log1p",
"log2",
"logical_not",
"negative",
"rad2deg",
"radians",
"reciprocal",
"rint",
"sign",
"signbit",
"sin",
"sinh",
"spacing",
"sqrt",
"square",
"tan",
"tanh",
"trunc",
]
@pytest.mark.parametrize("ufunc", unary_ufuncs)
def test_unary_ufunc(ufunc):
if ufunc == "fix":
pytest.skip("fix calls floor in a way that we do not yet support")
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(darr), da.Array)
assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)
with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc):
assert isinstance(npfunc(darr), da.Array)
else:
assert isinstance(npfunc(darr), np.ndarray)
assert_eq(npfunc(darr), npfunc(arr), equal_nan=True)
with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr), np.ndarray)
assert_eq(dafunc(arr), npfunc(arr), equal_nan=True)
@pytest.mark.parametrize("ufunc", binary_ufuncs)
def test_binary_ufunc(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr1 = np.random.randint(1, 100, size=(20, 20))
darr1 = da.from_array(arr1, 3)
arr2 = np.random.randint(1, 100, size=(20, 20))
darr2 = da.from_array(arr2, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(darr1, darr2), da.Array)
assert_eq(dafunc(darr1, darr2), npfunc(arr1, arr2))
# applying NumPy ufunc triggers computation or is lazy
assert isinstance(npfunc(darr1, darr2), da.Array)
assert_eq(npfunc(darr1, darr2), npfunc(arr1, arr2))
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr1, arr2), np.ndarray)
assert_eq(dafunc(arr1, arr2), npfunc(arr1, arr2))
# with scalar
assert isinstance(dafunc(darr1, 10), da.Array)
assert_eq(dafunc(darr1, 10), npfunc(arr1, 10))
with pytest.warns(None): # overflow in ldexp
assert isinstance(dafunc(10, darr1), da.Array)
assert_eq(dafunc(10, darr1), npfunc(10, arr1))
assert isinstance(dafunc(arr1, 10), np.ndarray)
assert_eq(dafunc(arr1, 10), npfunc(arr1, 10))
with pytest.warns(None): # overflow in ldexp
assert isinstance(dafunc(10, arr1), np.ndarray)
assert_eq(dafunc(10, arr1), npfunc(10, arr1))
def test_ufunc_outer():
arr1 = np.random.randint(1, 100, size=20)
darr1 = da.from_array(arr1, 3)
arr2 = np.random.randint(1, 100, size=(10, 3))
darr2 = da.from_array(arr2, 3)
# Check output types
assert isinstance(da.add.outer(darr1, darr2), da.Array)
assert isinstance(da.add.outer(arr1, darr2), da.Array)
assert isinstance(da.add.outer(darr1, arr2), da.Array)
assert isinstance(da.add.outer(arr1, arr2), np.ndarray)
# Check mix of dimensions, dtypes, and numpy/dask/object
cases = [
((darr1, darr2), (arr1, arr2)),
((darr2, darr1), (arr2, arr1)),
((darr2, darr1.astype("f8")), (arr2, arr1.astype("f8"))),
((darr1, arr2), (arr1, arr2)),
((darr1, 1), (arr1, 1)),
((1, darr2), (1, arr2)),
((1.5, darr2), (1.5, arr2)),
(([1, 2, 3], darr2), ([1, 2, 3], arr2)),
((darr1.sum(), darr2), (arr1.sum(), arr2)),
((np.array(1), darr2), (np.array(1), arr2)),
]
for (dA, dB), (A, B) in cases:
assert_eq(da.add.outer(dA, dB), np.add.outer(A, B))
# Check dtype kwarg works
assert_eq(
da.add.outer(darr1, darr2, dtype="f8"), np.add.outer(arr1, arr2, dtype="f8")
)
with pytest.raises(ValueError):
da.add.outer(darr1, darr2, out=arr1)
with pytest.raises(ValueError):
da.sin.outer(darr1, darr2)
@pytest.mark.parametrize("ufunc", ["isreal", "iscomplex", "real", "imag"])
def test_complex(ufunc):
dafunc = getattr(da, ufunc)
# Note that these functions are not NumPy ufuncs
npfunc = getattr(np, ufunc)
real = np.random.randint(1, 100, size=(20, 20))
imag = np.random.randint(1, 100, size=(20, 20)) * 1j
comp = real + imag
dareal = da.from_array(real, 3)
daimag = da.from_array(imag, 3)
dacomp = da.from_array(comp, 3)
assert_eq(dacomp.real, comp.real)
assert_eq(dacomp.imag, comp.imag)
assert_eq(dacomp.conj(), comp.conj())
for darr, arr in [(dacomp, comp), (dareal, real), (daimag, imag)]:
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(darr), da.Array)
assert_eq(dafunc(darr), npfunc(arr))
assert_eq(npfunc(darr), npfunc(arr))
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr), np.ndarray)
assert_eq(dafunc(arr), npfunc(arr))
@pytest.mark.parametrize("ufunc", ["frexp", "modf"])
def test_ufunc_2results(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
# applying Dask ufunc doesn't trigger computation
res1, res2 = dafunc(darr)
assert isinstance(res1, da.Array)
assert isinstance(res2, da.Array)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
# applying NumPy ufunc is now lazy
res1, res2 = npfunc(darr)
assert isinstance(res1, da.Array)
assert isinstance(res2, da.Array)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
# applying Dask ufunc to normal ndarray triggers computation
res1, res2 = dafunc(arr)
assert isinstance(res1, da.Array)
assert isinstance(res2, da.Array)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
def test_clip():
x = np.random.normal(0, 10, size=(10, 10))
d = da.from_array(x, chunks=(3, 4))
assert_eq(x.clip(5), d.clip(5))
assert_eq(x.clip(1, 5), d.clip(1, 5))
assert_eq(x.clip(min=5), d.clip(min=5))
assert_eq(x.clip(max=5), d.clip(max=5))
assert_eq(x.clip(max=1, min=5), d.clip(max=1, min=5))
assert_eq(x.clip(min=1, max=5), d.clip(min=1, max=5))
def test_angle():
real = np.random.randint(1, 100, size=(20, 20))
imag = np.random.randint(1, 100, size=(20, 20)) * 1j
comp = real + imag
dacomp = da.from_array(comp, 3)
assert_eq(da.angle(dacomp), np.angle(comp))
assert_eq(da.angle(dacomp, deg=True), np.angle(comp, deg=True))
assert isinstance(da.angle(comp), np.ndarray)
assert_eq(da.angle(comp), np.angle(comp))
def test_issignedinf():
with np.errstate(invalid="ignore", divide="ignore"):
arr = np.random.randint(-1, 2, size=(20, 20)).astype(float) / 0
darr = da.from_array(arr, 3)
assert_eq(np.isneginf(arr), da.isneginf(darr))
assert_eq(np.isposinf(arr), da.isposinf(darr))
@pytest.mark.parametrize("func", ["i0", "sinc", "nan_to_num"])
def test_non_ufunc_others(func):
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
dafunc = getattr(da, func)
npfunc = getattr(np, func)
assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)
def test_frompyfunc():
myadd = da.frompyfunc(add, 2, 1)
np_myadd = np.frompyfunc(add, 2, 1)
x = np.random.normal(0, 10, size=(10, 10))
dx = da.from_array(x, chunks=(3, 4))
y = np.random.normal(0, 10, size=10)
dy = da.from_array(y, chunks=2)
assert_eq(myadd(dx, dy), np_myadd(x, y))
assert_eq(myadd.outer(dx, dy), np_myadd.outer(x, y))
with pytest.raises(NotImplementedError):
da.frompyfunc(lambda x, y: (x + y, x - y), 2, 2)
def test_frompyfunc_wrapper():
f = da_frompyfunc(add, 2, 1)
np_f = np.frompyfunc(add, 2, 1)
x = np.array([1, 2, 3])
# Callable
np.testing.assert_equal(f(x, 1), np_f(x, 1))
# picklable
f2 = pickle.loads(pickle.dumps(f))
np.testing.assert_equal(f2(x, 1), np_f(x, 1))
# Attributes
assert f.ntypes == np_f.ntypes
with pytest.raises(AttributeError):
f.not_an_attribute
# Tab completion
assert "ntypes" in dir(f)
# Methods
np.testing.assert_equal(f.outer(x, x), np_f.outer(x, x))
# funcname
assert f.__name__ == "frompyfunc-add"
# repr
assert repr(f) == "da.frompyfunc<add, 2, 1>"
# tokenize
assert tokenize(da_frompyfunc(add, 2, 1)) == tokenize(da_frompyfunc(add, 2, 1))
def test_array_ufunc():
x = np.arange(24).reshape((4, 6))
d = da.from_array(x, chunks=(2, 3))
for func in [np.sin, np.sum, np.negative, partial(np.prod, axis=0)]:
assert isinstance(func(d), da.Array)
assert_eq(func(d), func(x))
def test_array_ufunc_binop():
x = np.arange(25).reshape((5, 5))
d = da.from_array(x, chunks=(2, 2))
for func in [np.add, np.multiply]:
assert isinstance(func(d, d), da.Array)
assert_eq(func(d, d), func(x, x))
assert isinstance(func.outer(d, d), da.Array)
assert_eq(func.outer(d, d), func.outer(x, x))
def test_array_ufunc_out():
x = da.arange(10, chunks=(5,))
np.sin(x, out=x)
np.add(x, 10, out=x)
assert_eq(x, np.sin(np.arange(10)) + 10)
def test_unsupported_ufunc_methods():
x = da.arange(10, chunks=(5,))
with pytest.raises(TypeError):
assert np.add.reduce(x)
def test_out_numpy():
x = da.arange(10, chunks=(5,))
empty = np.empty(10, dtype=x.dtype)
with pytest.raises((TypeError, NotImplementedError)) as info:
np.add(x, 1, out=empty)
assert "ndarray" in str(info.value)
assert "Array" in str(info.value)
def test_out_shape_mismatch():
x = da.arange(10, chunks=(5,))
y = da.arange(15, chunks=(5,))
with pytest.raises(ValueError):
assert np.log(x, out=y)
def test_divmod():
arr1 = np.random.randint(1, 100, size=(20, 20))
arr2 = np.random.randint(1, 100, size=(20, 20))
darr1 = da.from_array(arr1, 3)
darr2 = da.from_array(arr2, 3)
result = np.divmod(darr1, 2.0)
expected = np.divmod(arr1, 2.0)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
result = np.divmod(darr1, darr2)
expected = np.divmod(arr1, arr2)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
result = divmod(darr1, 2.0)
expected = divmod(arr1, 2.0)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
result = divmod(darr1, darr2)
expected = divmod(arr1, arr2)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
@pytest.mark.parametrize("dt", ["float64", "float32", "int32", "int64"])
def test_dtype_kwarg(dt):
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
darr1 = da.from_array(arr1)
darr2 = da.from_array(arr2)
expected = np.add(arr1, arr2, dtype=dt)
result = np.add(darr1, darr2, dtype=dt)
assert_eq(expected, result)
result = da.add(darr1, darr2, dtype=dt)
assert_eq(expected, result)
| 27.1643 | 85 | 0.62052 | import pickle
from functools import partial
from operator import add
import pytest
np = pytest.importorskip("numpy")
import dask.array as da
from dask.array.ufunc import da_frompyfunc
from dask.array.utils import assert_eq
from dask.base import tokenize
DISCLAIMER = """
This docstring was copied from numpy.{name}.
Some inconsistencies with the Dask version may exist.
"""
@pytest.mark.parametrize("name", ["log", "modf", "frexp"])
def test_ufunc_meta(name):
disclaimer = DISCLAIMER.format(name=name)
skip_test = " # doctest: +SKIP"
ufunc = getattr(da, name)
assert ufunc.__name__ == name
assert disclaimer in ufunc.__doc__
assert (
ufunc.__doc__.replace(disclaimer, "").replace(skip_test, "")
== getattr(np, name).__doc__
)
def test_ufunc():
for attr in ["nin", "nargs", "nout", "ntypes", "identity", "signature", "types"]:
assert getattr(da.log, attr) == getattr(np.log, attr)
with pytest.raises(AttributeError):
da.log.not_an_attribute
assert repr(da.log) == repr(np.log)
assert "nin" in dir(da.log)
assert "outer" in dir(da.log)
binary_ufuncs = [
"add",
"arctan2",
"copysign",
"divide",
"equal",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"floor_divide",
"fmax",
"fmin",
"fmod",
"greater",
"greater_equal",
"hypot",
"ldexp",
"less",
"less_equal",
"logaddexp",
"logaddexp2",
"logical_and",
"logical_or",
"logical_xor",
"maximum",
"minimum",
"mod",
"multiply",
"nextafter",
"not_equal",
"power",
"remainder",
"subtract",
"true_divide",
"float_power",
]
unary_ufuncs = [
"absolute",
"arccos",
"arccosh",
"arcsin",
"arcsinh",
"arctan",
"arctanh",
"bitwise_not",
"cbrt",
"ceil",
"conj",
"cos",
"cosh",
"deg2rad",
"degrees",
"exp",
"exp2",
"expm1",
"fabs",
"fix",
"floor",
"invert",
"isfinite",
"isinf",
"isnan",
"log",
"log10",
"log1p",
"log2",
"logical_not",
"negative",
"rad2deg",
"radians",
"reciprocal",
"rint",
"sign",
"signbit",
"sin",
"sinh",
"spacing",
"sqrt",
"square",
"tan",
"tanh",
"trunc",
]
@pytest.mark.parametrize("ufunc", unary_ufuncs)
def test_unary_ufunc(ufunc):
if ufunc == "fix":
pytest.skip("fix calls floor in a way that we do not yet support")
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
with pytest.warns(None):
assert isinstance(dafunc(darr), da.Array)
assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)
with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)
# applying NumPy ufunc is lazy
if isinstance(npfunc, np.ufunc):
assert isinstance(npfunc(darr), da.Array)
else:
assert isinstance(npfunc(darr), np.ndarray)
assert_eq(npfunc(darr), npfunc(arr), equal_nan=True)
with pytest.warns(None): # some invalid values (arccos, arcsin, etc.)
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr), np.ndarray)
assert_eq(dafunc(arr), npfunc(arr), equal_nan=True)
@pytest.mark.parametrize("ufunc", binary_ufuncs)
def test_binary_ufunc(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr1 = np.random.randint(1, 100, size=(20, 20))
darr1 = da.from_array(arr1, 3)
arr2 = np.random.randint(1, 100, size=(20, 20))
darr2 = da.from_array(arr2, 3)
# applying Dask ufunc doesn't trigger computation
assert isinstance(dafunc(darr1, darr2), da.Array)
assert_eq(dafunc(darr1, darr2), npfunc(arr1, arr2))
assert isinstance(npfunc(darr1, darr2), da.Array)
assert_eq(npfunc(darr1, darr2), npfunc(arr1, arr2))
assert isinstance(dafunc(arr1, arr2), np.ndarray)
assert_eq(dafunc(arr1, arr2), npfunc(arr1, arr2))
assert isinstance(dafunc(darr1, 10), da.Array)
assert_eq(dafunc(darr1, 10), npfunc(arr1, 10))
with pytest.warns(None):
assert isinstance(dafunc(10, darr1), da.Array)
assert_eq(dafunc(10, darr1), npfunc(10, arr1))
assert isinstance(dafunc(arr1, 10), np.ndarray)
assert_eq(dafunc(arr1, 10), npfunc(arr1, 10))
with pytest.warns(None):
assert isinstance(dafunc(10, arr1), np.ndarray)
assert_eq(dafunc(10, arr1), npfunc(10, arr1))
def test_ufunc_outer():
arr1 = np.random.randint(1, 100, size=20)
darr1 = da.from_array(arr1, 3)
arr2 = np.random.randint(1, 100, size=(10, 3))
darr2 = da.from_array(arr2, 3)
assert isinstance(da.add.outer(darr1, darr2), da.Array)
assert isinstance(da.add.outer(arr1, darr2), da.Array)
assert isinstance(da.add.outer(darr1, arr2), da.Array)
assert isinstance(da.add.outer(arr1, arr2), np.ndarray)
cases = [
((darr1, darr2), (arr1, arr2)),
((darr2, darr1), (arr2, arr1)),
((darr2, darr1.astype("f8")), (arr2, arr1.astype("f8"))),
((darr1, arr2), (arr1, arr2)),
((darr1, 1), (arr1, 1)),
((1, darr2), (1, arr2)),
((1.5, darr2), (1.5, arr2)),
(([1, 2, 3], darr2), ([1, 2, 3], arr2)),
((darr1.sum(), darr2), (arr1.sum(), arr2)),
((np.array(1), darr2), (np.array(1), arr2)),
]
for (dA, dB), (A, B) in cases:
assert_eq(da.add.outer(dA, dB), np.add.outer(A, B))
assert_eq(
da.add.outer(darr1, darr2, dtype="f8"), np.add.outer(arr1, arr2, dtype="f8")
)
with pytest.raises(ValueError):
da.add.outer(darr1, darr2, out=arr1)
with pytest.raises(ValueError):
da.sin.outer(darr1, darr2)
@pytest.mark.parametrize("ufunc", ["isreal", "iscomplex", "real", "imag"])
def test_complex(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
real = np.random.randint(1, 100, size=(20, 20))
imag = np.random.randint(1, 100, size=(20, 20)) * 1j
comp = real + imag
dareal = da.from_array(real, 3)
daimag = da.from_array(imag, 3)
dacomp = da.from_array(comp, 3)
assert_eq(dacomp.real, comp.real)
assert_eq(dacomp.imag, comp.imag)
assert_eq(dacomp.conj(), comp.conj())
for darr, arr in [(dacomp, comp), (dareal, real), (daimag, imag)]:
assert isinstance(dafunc(darr), da.Array)
assert_eq(dafunc(darr), npfunc(arr))
assert_eq(npfunc(darr), npfunc(arr))
# applying Dask ufunc to normal ndarray triggers computation
assert isinstance(dafunc(arr), np.ndarray)
assert_eq(dafunc(arr), npfunc(arr))
@pytest.mark.parametrize("ufunc", ["frexp", "modf"])
def test_ufunc_2results(ufunc):
dafunc = getattr(da, ufunc)
npfunc = getattr(np, ufunc)
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
# applying Dask ufunc doesn't trigger computation
res1, res2 = dafunc(darr)
assert isinstance(res1, da.Array)
assert isinstance(res2, da.Array)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
res1, res2 = npfunc(darr)
assert isinstance(res1, da.Array)
assert isinstance(res2, da.Array)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
res1, res2 = dafunc(arr)
assert isinstance(res1, da.Array)
assert isinstance(res2, da.Array)
exp1, exp2 = npfunc(arr)
assert_eq(res1, exp1)
assert_eq(res2, exp2)
def test_clip():
x = np.random.normal(0, 10, size=(10, 10))
d = da.from_array(x, chunks=(3, 4))
assert_eq(x.clip(5), d.clip(5))
assert_eq(x.clip(1, 5), d.clip(1, 5))
assert_eq(x.clip(min=5), d.clip(min=5))
assert_eq(x.clip(max=5), d.clip(max=5))
assert_eq(x.clip(max=1, min=5), d.clip(max=1, min=5))
assert_eq(x.clip(min=1, max=5), d.clip(min=1, max=5))
def test_angle():
real = np.random.randint(1, 100, size=(20, 20))
imag = np.random.randint(1, 100, size=(20, 20)) * 1j
comp = real + imag
dacomp = da.from_array(comp, 3)
assert_eq(da.angle(dacomp), np.angle(comp))
assert_eq(da.angle(dacomp, deg=True), np.angle(comp, deg=True))
assert isinstance(da.angle(comp), np.ndarray)
assert_eq(da.angle(comp), np.angle(comp))
def test_issignedinf():
with np.errstate(invalid="ignore", divide="ignore"):
arr = np.random.randint(-1, 2, size=(20, 20)).astype(float) / 0
darr = da.from_array(arr, 3)
assert_eq(np.isneginf(arr), da.isneginf(darr))
assert_eq(np.isposinf(arr), da.isposinf(darr))
@pytest.mark.parametrize("func", ["i0", "sinc", "nan_to_num"])
def test_non_ufunc_others(func):
arr = np.random.randint(1, 100, size=(20, 20))
darr = da.from_array(arr, 3)
dafunc = getattr(da, func)
npfunc = getattr(np, func)
assert_eq(dafunc(darr), npfunc(arr), equal_nan=True)
def test_frompyfunc():
myadd = da.frompyfunc(add, 2, 1)
np_myadd = np.frompyfunc(add, 2, 1)
x = np.random.normal(0, 10, size=(10, 10))
dx = da.from_array(x, chunks=(3, 4))
y = np.random.normal(0, 10, size=10)
dy = da.from_array(y, chunks=2)
assert_eq(myadd(dx, dy), np_myadd(x, y))
assert_eq(myadd.outer(dx, dy), np_myadd.outer(x, y))
with pytest.raises(NotImplementedError):
da.frompyfunc(lambda x, y: (x + y, x - y), 2, 2)
def test_frompyfunc_wrapper():
f = da_frompyfunc(add, 2, 1)
np_f = np.frompyfunc(add, 2, 1)
x = np.array([1, 2, 3])
np.testing.assert_equal(f(x, 1), np_f(x, 1))
f2 = pickle.loads(pickle.dumps(f))
np.testing.assert_equal(f2(x, 1), np_f(x, 1))
assert f.ntypes == np_f.ntypes
with pytest.raises(AttributeError):
f.not_an_attribute
assert "ntypes" in dir(f)
np.testing.assert_equal(f.outer(x, x), np_f.outer(x, x))
assert f.__name__ == "frompyfunc-add"
assert repr(f) == "da.frompyfunc<add, 2, 1>"
assert tokenize(da_frompyfunc(add, 2, 1)) == tokenize(da_frompyfunc(add, 2, 1))
def test_array_ufunc():
x = np.arange(24).reshape((4, 6))
d = da.from_array(x, chunks=(2, 3))
for func in [np.sin, np.sum, np.negative, partial(np.prod, axis=0)]:
assert isinstance(func(d), da.Array)
assert_eq(func(d), func(x))
def test_array_ufunc_binop():
x = np.arange(25).reshape((5, 5))
d = da.from_array(x, chunks=(2, 2))
for func in [np.add, np.multiply]:
assert isinstance(func(d, d), da.Array)
assert_eq(func(d, d), func(x, x))
assert isinstance(func.outer(d, d), da.Array)
assert_eq(func.outer(d, d), func.outer(x, x))
def test_array_ufunc_out():
x = da.arange(10, chunks=(5,))
np.sin(x, out=x)
np.add(x, 10, out=x)
assert_eq(x, np.sin(np.arange(10)) + 10)
def test_unsupported_ufunc_methods():
x = da.arange(10, chunks=(5,))
with pytest.raises(TypeError):
assert np.add.reduce(x)
def test_out_numpy():
x = da.arange(10, chunks=(5,))
empty = np.empty(10, dtype=x.dtype)
with pytest.raises((TypeError, NotImplementedError)) as info:
np.add(x, 1, out=empty)
assert "ndarray" in str(info.value)
assert "Array" in str(info.value)
def test_out_shape_mismatch():
x = da.arange(10, chunks=(5,))
y = da.arange(15, chunks=(5,))
with pytest.raises(ValueError):
assert np.log(x, out=y)
def test_divmod():
arr1 = np.random.randint(1, 100, size=(20, 20))
arr2 = np.random.randint(1, 100, size=(20, 20))
darr1 = da.from_array(arr1, 3)
darr2 = da.from_array(arr2, 3)
result = np.divmod(darr1, 2.0)
expected = np.divmod(arr1, 2.0)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
result = np.divmod(darr1, darr2)
expected = np.divmod(arr1, arr2)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
result = divmod(darr1, 2.0)
expected = divmod(arr1, 2.0)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
result = divmod(darr1, darr2)
expected = divmod(arr1, arr2)
assert_eq(result[0], expected[0])
assert_eq(result[1], expected[1])
@pytest.mark.parametrize("dt", ["float64", "float32", "int32", "int64"])
def test_dtype_kwarg(dt):
arr1 = np.array([1, 2, 3])
arr2 = np.array([4, 5, 6])
darr1 = da.from_array(arr1)
darr2 = da.from_array(arr2)
expected = np.add(arr1, arr2, dtype=dt)
result = np.add(darr1, darr2, dtype=dt)
assert_eq(expected, result)
result = da.add(darr1, darr2, dtype=dt)
assert_eq(expected, result)
| true | true |
1c398f576460ce9f02cb2595cad614379587e215 | 419 | py | Python | quantifiedcode/plugins/example/backend/tasks/test.py | marcinguy/scanmycode-ce | 4a5fa1e5e9c8450f5c84164f9fa1524115f65c8b | [
"BSD-3-Clause"
] | 138 | 2022-02-02T15:38:29.000Z | 2022-03-30T21:23:33.000Z | quantifiedcode/plugins/example/backend/tasks/test.py | bbbfkl/scanmycode-ce | 786ae9a83a0839b70ac773a673a3ac69a0484ee4 | [
"BSD-3-Clause"
] | 4 | 2022-02-07T04:51:55.000Z | 2022-03-31T03:40:10.000Z | quantifiedcode/plugins/example/backend/tasks/test.py | bbbfkl/scanmycode-ce | 786ae9a83a0839b70ac773a673a3ac69a0484ee4 | [
"BSD-3-Clause"
] | 16 | 2022-01-31T14:48:18.000Z | 2022-03-28T07:12:04.000Z | """
Contains tasks and helper functions to send notifications.
"""
import logging
import requests
import json
from quantifiedcode.settings import settings
from quantifiedcode.backend.worker import celery
logger = logging.getLogger(__name__)
@celery.task(time_limit=120, queue="email", ignore_result=False)
def test(webhook, template, template_context=None):
"""
Example task.
"""
pass
| 16.115385 | 64 | 0.735084 |
import logging
import requests
import json
from quantifiedcode.settings import settings
from quantifiedcode.backend.worker import celery
logger = logging.getLogger(__name__)
@celery.task(time_limit=120, queue="email", ignore_result=False)
def test(webhook, template, template_context=None):
pass
| true | true |
1c39908d91c9003fdc6a0c1eba0de122ac737542 | 4,954 | py | Python | sam/utils/creds.py | stephanpieterse/sam-iam | 7c7c1a8fe8577f63fc6b91f7b8b1dbbaa88cf7a9 | [
"MIT"
] | null | null | null | sam/utils/creds.py | stephanpieterse/sam-iam | 7c7c1a8fe8577f63fc6b91f7b8b1dbbaa88cf7a9 | [
"MIT"
] | null | null | null | sam/utils/creds.py | stephanpieterse/sam-iam | 7c7c1a8fe8577f63fc6b91f7b8b1dbbaa88cf7a9 | [
"MIT"
] | 1 | 2021-08-06T07:48:46.000Z | 2021-08-06T07:48:46.000Z |
class AWSCreds:
def __init__(self, access_key, secret_key, session) -> None:
super().__init__()
self.session = session
self.secret_key = secret_key
self.access_key = access_key
def __get_act_from_arn(inp):
return inp.split(":")[4]
def __click_output(debug, echo_env, response, region):
import click
if debug is True:
click.secho('\n' * 10)
click.secho("=" * 30)
click.secho("#AWS CONFIG CREDENTIALS FILE")
click.secho("=" * 30)
click.secho('[tempaccount]')
click.secho('aws_access_key_id = {}'.format(response['Credentials']['AccessKeyId']))
click.secho('aws_secret_access_key = {}'.format(response['Credentials']['SecretAccessKey']))
click.secho('aws_session_token = {}'.format(response['Credentials']['SessionToken']))
click.secho('region = {}'.format(region))
click.secho()
click.secho("=" * 30)
click.secho("#AWS ENV VARIABLES")
click.secho("=" * 30)
click.secho('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
click.secho('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
click.secho('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
click.secho('export AWS_DEFAULT_REGION={}'.format(region))
if echo_env is True:
click.secho('=' * 100, fg='blue')
click.secho('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
click.secho('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
click.secho('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
click.secho('export AWS_DEFAULT_REGION={}'.format(region))
click.secho('=' * 100, fg='blue')
def __console_output(debug, echo_env, response, region):
if debug is True:
print('\n' * 10)
print("=" * 30)
print("#AWS CONFIG CREDENTIALS FILE")
print("=" * 30)
print('[tempaccount]')
print('aws_access_key_id = {}'.format(response['Credentials']['AccessKeyId']))
print('aws_secret_access_key = {}'.format(response['Credentials']['SecretAccessKey']))
print('aws_session_token = {}'.format(response['Credentials']['SessionToken']))
print('region = {}'.format(region))
print()
print("=" * 30)
print("#AWS ENV VARIABLES")
print("=" * 30)
print('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
print('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
print('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
print('export AWS_DEFAULT_REGION={}'.format(region))
if echo_env is True:
print('=' * 100)
print('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
print('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
print('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
print('export AWS_DEFAULT_REGION={}'.format(region))
print('=' * 100)
def __get_provider(arn1, arn2):
if 'saml-provider' in arn1:
return arn1
return arn2
def __get_role(arn1, arn2):
if 'role' in arn1:
return arn1
return arn2
def __get_saml_roles_providers_from_saml(saml):
import base64
iam_part_arn = 'arn:aws:iam::'
decodedstr = base64.b64decode(saml).decode("utf-8")
arns = []
for line in decodedstr.split("\n"):
if iam_part_arn in line:
parts = line.split(iam_part_arn)
for part in parts[1:]:
# if saml_part_arn not in part:
arn = part.split("<")[0].replace(",", "")
arns.append(iam_part_arn + arn)
role_provider_dict = {}
it = iter(arns)
for x in it:
arn1 = x
arn2 = next(it)
role_provider_dict[__get_role(arn1, arn2)] = __get_provider(arn1, arn2)
return role_provider_dict
def get_creds_via_saml_request(role, saml, debug, echo_env, region, ttl, cli=True):
import boto3
client = boto3.client("sts", region_name=region)
role_provider_dict = __get_saml_roles_providers_from_saml(saml)
principal_arn = role_provider_dict[role]
response = client.assume_role_with_saml(
RoleArn=role,
PrincipalArn=principal_arn,
SAMLAssertion=saml,
DurationSeconds=int(ttl)
)
if cli is True:
__click_output(debug, echo_env, response, region)
else:
__console_output(debug, echo_env, response, region)
return AWSCreds(response['Credentials']['AccessKeyId'],
response['Credentials']['SecretAccessKey'],
response['Credentials']['SessionToken'])
| 38.703125 | 105 | 0.633024 |
class AWSCreds:
def __init__(self, access_key, secret_key, session) -> None:
super().__init__()
self.session = session
self.secret_key = secret_key
self.access_key = access_key
def __get_act_from_arn(inp):
return inp.split(":")[4]
def __click_output(debug, echo_env, response, region):
import click
if debug is True:
click.secho('\n' * 10)
click.secho("=" * 30)
click.secho("#AWS CONFIG CREDENTIALS FILE")
click.secho("=" * 30)
click.secho('[tempaccount]')
click.secho('aws_access_key_id = {}'.format(response['Credentials']['AccessKeyId']))
click.secho('aws_secret_access_key = {}'.format(response['Credentials']['SecretAccessKey']))
click.secho('aws_session_token = {}'.format(response['Credentials']['SessionToken']))
click.secho('region = {}'.format(region))
click.secho()
click.secho("=" * 30)
click.secho("#AWS ENV VARIABLES")
click.secho("=" * 30)
click.secho('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
click.secho('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
click.secho('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
click.secho('export AWS_DEFAULT_REGION={}'.format(region))
if echo_env is True:
click.secho('=' * 100, fg='blue')
click.secho('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
click.secho('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
click.secho('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
click.secho('export AWS_DEFAULT_REGION={}'.format(region))
click.secho('=' * 100, fg='blue')
def __console_output(debug, echo_env, response, region):
if debug is True:
print('\n' * 10)
print("=" * 30)
print("#AWS CONFIG CREDENTIALS FILE")
print("=" * 30)
print('[tempaccount]')
print('aws_access_key_id = {}'.format(response['Credentials']['AccessKeyId']))
print('aws_secret_access_key = {}'.format(response['Credentials']['SecretAccessKey']))
print('aws_session_token = {}'.format(response['Credentials']['SessionToken']))
print('region = {}'.format(region))
print()
print("=" * 30)
print("#AWS ENV VARIABLES")
print("=" * 30)
print('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
print('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
print('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
print('export AWS_DEFAULT_REGION={}'.format(region))
if echo_env is True:
print('=' * 100)
print('export AWS_ACCESS_KEY_ID={}'.format(response['Credentials']['AccessKeyId']))
print('export AWS_SECRET_ACCESS_KEY={}'.format(response['Credentials']['SecretAccessKey']))
print('export AWS_SESSION_TOKEN={}'.format(response['Credentials']['SessionToken']))
print('export AWS_DEFAULT_REGION={}'.format(region))
print('=' * 100)
def __get_provider(arn1, arn2):
if 'saml-provider' in arn1:
return arn1
return arn2
def __get_role(arn1, arn2):
if 'role' in arn1:
return arn1
return arn2
def __get_saml_roles_providers_from_saml(saml):
import base64
iam_part_arn = 'arn:aws:iam::'
decodedstr = base64.b64decode(saml).decode("utf-8")
arns = []
for line in decodedstr.split("\n"):
if iam_part_arn in line:
parts = line.split(iam_part_arn)
for part in parts[1:]:
arn = part.split("<")[0].replace(",", "")
arns.append(iam_part_arn + arn)
role_provider_dict = {}
it = iter(arns)
for x in it:
arn1 = x
arn2 = next(it)
role_provider_dict[__get_role(arn1, arn2)] = __get_provider(arn1, arn2)
return role_provider_dict
def get_creds_via_saml_request(role, saml, debug, echo_env, region, ttl, cli=True):
import boto3
client = boto3.client("sts", region_name=region)
role_provider_dict = __get_saml_roles_providers_from_saml(saml)
principal_arn = role_provider_dict[role]
response = client.assume_role_with_saml(
RoleArn=role,
PrincipalArn=principal_arn,
SAMLAssertion=saml,
DurationSeconds=int(ttl)
)
if cli is True:
__click_output(debug, echo_env, response, region)
else:
__console_output(debug, echo_env, response, region)
return AWSCreds(response['Credentials']['AccessKeyId'],
response['Credentials']['SecretAccessKey'],
response['Credentials']['SessionToken'])
| true | true |
1c3990bf204221ef25520cc6750736b1380d5478 | 2,551 | py | Python | tests/test_rsvd.py | eldrin/aarms | bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d | [
"MIT"
] | null | null | null | tests/test_rsvd.py | eldrin/aarms | bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d | [
"MIT"
] | 3 | 2020-11-05T08:44:46.000Z | 2020-11-10T17:25:15.000Z | tests/test_rsvd.py | eldrin/aarms | bdd5455ac8dcfc1fe91a12fdd132b74e6c37609d | [
"MIT"
] | null | null | null | import unittest
import os
os.environ['NUMBA_NUM_THREADS'] = '1'
import numpy as np
from scipy import sparse as sp
from aarms.models.rsvd import RSVD, RSVDSPPMI
from aarms.models.transform import sppmi
from base_test import TestAARMS
class TestRSVD(TestAARMS):
"""
"""
def test_rsvd_factorize(self):
"""
This test function refers a lot from::
https://github.com/benfred/implicit/blob/master/tests/als_test.py
"""
X = sp.csr_matrix([[1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[1, 0, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1]])
cases = [dtype for dtype in (np.float32, np.float64)]
for dtype in cases:
try:
# Truncated SVD does not accept the full rank k (should be smaller)
svd = RSVD(k = 6, dtype = dtype)
svd.fit(X)
except Exception as e:
self.fail(msg = "failed for basic user-item factorization: "
f"{e}, dtype={dtype}, ")
Xhat = svd.embeddings_['user'] @ svd.embeddings_['item'].T
self._compare_recon(X, Xhat, thresh=3e-1, **{'dtype': dtype})
def test_rsvdsppmi_factorize(self):
"""
This test function refers a lot from::
https://github.com/benfred/implicit/blob/master/tests/als_test.py
"""
X = sp.csr_matrix([[1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[1, 0, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1]])
cases = [dtype for dtype in (np.float32, np.float64)]
for dtype in cases:
try:
svd = RSVDSPPMI(k = 6, dtype = dtype)
svd.fit(X)
except Exception as e:
self.fail(msg = "failed for basic user-item factorization: "
f"{e}, dtype={dtype}, ")
Xhat = svd.embeddings_['user'] @ svd.embeddings_['item'].T
user_item_sppmi = sppmi(X, svd.kappa)
self._compare_recon(user_item_sppmi, Xhat,
thresh=1e-3, **{'dtype': dtype})
if __name__ == "__main__":
unittest.main()
| 33.12987 | 83 | 0.454332 | import unittest
import os
os.environ['NUMBA_NUM_THREADS'] = '1'
import numpy as np
from scipy import sparse as sp
from aarms.models.rsvd import RSVD, RSVDSPPMI
from aarms.models.transform import sppmi
from base_test import TestAARMS
class TestRSVD(TestAARMS):
def test_rsvd_factorize(self):
X = sp.csr_matrix([[1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[1, 0, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1]])
cases = [dtype for dtype in (np.float32, np.float64)]
for dtype in cases:
try:
svd = RSVD(k = 6, dtype = dtype)
svd.fit(X)
except Exception as e:
self.fail(msg = "failed for basic user-item factorization: "
f"{e}, dtype={dtype}, ")
Xhat = svd.embeddings_['user'] @ svd.embeddings_['item'].T
self._compare_recon(X, Xhat, thresh=3e-1, **{'dtype': dtype})
def test_rsvdsppmi_factorize(self):
X = sp.csr_matrix([[1, 1, 0, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[1, 0, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 1]])
cases = [dtype for dtype in (np.float32, np.float64)]
for dtype in cases:
try:
svd = RSVDSPPMI(k = 6, dtype = dtype)
svd.fit(X)
except Exception as e:
self.fail(msg = "failed for basic user-item factorization: "
f"{e}, dtype={dtype}, ")
Xhat = svd.embeddings_['user'] @ svd.embeddings_['item'].T
user_item_sppmi = sppmi(X, svd.kappa)
self._compare_recon(user_item_sppmi, Xhat,
thresh=1e-3, **{'dtype': dtype})
if __name__ == "__main__":
unittest.main()
| true | true |
1c3990d6a79fb17cb6131442f7acbe06d709de7e | 2,798 | py | Python | picoCTF-web/tests/integration/test_teams.py | minhnq1618/picoCTF | f634f0e55be6b1a8552a33e4f94e7487142e8bce | [
"MIT"
] | 280 | 2016-03-23T05:16:07.000Z | 2022-03-25T10:45:33.000Z | picoCTF-web/tests/integration/test_teams.py | minhnq1618/picoCTF | f634f0e55be6b1a8552a33e4f94e7487142e8bce | [
"MIT"
] | 384 | 2016-03-22T05:14:47.000Z | 2021-09-13T23:46:14.000Z | picoCTF-web/tests/integration/test_teams.py | minhnq1618/picoCTF | f634f0e55be6b1a8552a33e4f94e7487142e8bce | [
"MIT"
] | 142 | 2016-03-15T16:27:21.000Z | 2022-02-23T23:41:28.000Z | """Tests for the /api/v1/teams routes."""
from pytest_mongo import factories
from pytest_redis import factories
from .common import ( # noqa (fixture)
ADMIN_DEMOGRAPHICS,
clear_db,
client,
decode_response,
get_csrf_token,
register_test_accounts,
TEACHER_DEMOGRAPHICS,
STUDENT_DEMOGRAPHICS,
get_conn,
)
def test_create_team(mongo_proc, redis_proc, client): # noqa (fixture)
"""Tests the POST /teams endpoint."""
clear_db()
register_test_accounts()
# Attempt to create a new team as a teacher
client.post(
"/api/v1/user/login",
json={
"username": TEACHER_DEMOGRAPHICS["username"],
"password": TEACHER_DEMOGRAPHICS["password"],
},
)
res = client.post(
"/api/v1/teams", json={"team_name": "newteam", "team_password": "newteam"}
)
assert res.status_code == 403
assert res.json["message"] == "Teachers may not create teams"
client.get("/api/v1/user/logout")
# Attempt to create a team with a name previously used by a user
client.post(
"/api/v1/user/login",
json={
"username": STUDENT_DEMOGRAPHICS["username"],
"password": STUDENT_DEMOGRAPHICS["password"],
},
)
res = client.post(
"/api/v1/teams",
json={"team_name": ADMIN_DEMOGRAPHICS["username"], "team_password": "newteam"},
)
assert res.status_code == 409
assert res.json["message"] == "There is already a user with this name."
# Add a mock team and attempt to create a team with the same name
db = get_conn()
db.teams.insert({"team_name": "test teamname"})
res = client.post(
"/api/v1/teams", json={"team_name": "test teamname", "team_password": "newteam"}
)
assert res.status_code == 409
assert res.json["message"] == "There is already a team with this name."
# Create and join a team
res = client.post(
"/api/v1/teams", json={"team_name": "newteam", "team_password": "newteam"}
)
assert res.status_code == 201
assert res.json["success"] is True
new_tid = res.json["tid"]
# Check that membership has been transferred
user = db.users.find_one({"username": STUDENT_DEMOGRAPHICS["username"]})
old_team = db.teams.find_one({"team_name": STUDENT_DEMOGRAPHICS["username"]})
new_team = db.teams.find_one({"tid": new_tid})
assert user["tid"] == new_tid
assert old_team["size"] == 0
assert new_team["size"] == 1
# Attempt to create another team as the same user
res = client.post(
"/api/v1/teams", json={"team_name": "newteam2", "team_password": "newteam2"}
)
assert res.status_code == 422
assert (
res.json["message"] == "You can only create one new team per " + "user account!"
)
| 32.534884 | 88 | 0.630808 | from pytest_mongo import factories
from pytest_redis import factories
from .common import (
ADMIN_DEMOGRAPHICS,
clear_db,
client,
decode_response,
get_csrf_token,
register_test_accounts,
TEACHER_DEMOGRAPHICS,
STUDENT_DEMOGRAPHICS,
get_conn,
)
def test_create_team(mongo_proc, redis_proc, client):
clear_db()
register_test_accounts()
client.post(
"/api/v1/user/login",
json={
"username": TEACHER_DEMOGRAPHICS["username"],
"password": TEACHER_DEMOGRAPHICS["password"],
},
)
res = client.post(
"/api/v1/teams", json={"team_name": "newteam", "team_password": "newteam"}
)
assert res.status_code == 403
assert res.json["message"] == "Teachers may not create teams"
client.get("/api/v1/user/logout")
client.post(
"/api/v1/user/login",
json={
"username": STUDENT_DEMOGRAPHICS["username"],
"password": STUDENT_DEMOGRAPHICS["password"],
},
)
res = client.post(
"/api/v1/teams",
json={"team_name": ADMIN_DEMOGRAPHICS["username"], "team_password": "newteam"},
)
assert res.status_code == 409
assert res.json["message"] == "There is already a user with this name."
db = get_conn()
db.teams.insert({"team_name": "test teamname"})
res = client.post(
"/api/v1/teams", json={"team_name": "test teamname", "team_password": "newteam"}
)
assert res.status_code == 409
assert res.json["message"] == "There is already a team with this name."
res = client.post(
"/api/v1/teams", json={"team_name": "newteam", "team_password": "newteam"}
)
assert res.status_code == 201
assert res.json["success"] is True
new_tid = res.json["tid"]
user = db.users.find_one({"username": STUDENT_DEMOGRAPHICS["username"]})
old_team = db.teams.find_one({"team_name": STUDENT_DEMOGRAPHICS["username"]})
new_team = db.teams.find_one({"tid": new_tid})
assert user["tid"] == new_tid
assert old_team["size"] == 0
assert new_team["size"] == 1
res = client.post(
"/api/v1/teams", json={"team_name": "newteam2", "team_password": "newteam2"}
)
assert res.status_code == 422
assert (
res.json["message"] == "You can only create one new team per " + "user account!"
)
| true | true |
1c3990ef5c540d56a053816749da463b0e21a005 | 1,781 | py | Python | notebooks/relatorio.py | lbarosi/pylattes-lxml | 4de762f73a2086501ce06226ed04fb3c90fd9bd5 | [
"MIT"
] | null | null | null | notebooks/relatorio.py | lbarosi/pylattes-lxml | 4de762f73a2086501ce06226ed04fb3c90fd9bd5 | [
"MIT"
] | 1 | 2021-03-31T20:04:36.000Z | 2021-03-31T20:04:36.000Z | notebooks/relatorio.py | lbarosi/pylattes-lxml | 4de762f73a2086501ce06226ed04fb3c90fd9bd5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import sys
import papermill as pm
from datetime import datetime as dt
import subprocess
import argparse
import warnings
warnings.filterwarnings("ignore")
def filename(nome):
inicio = dt.now().strftime('%d_%m_%Y_%M%S')
filename = 'RelatorioLattes-' + nome + '_' + inicio + '.ipynb'
return filename
def run_notebook(nome):
notebook_template = 'RelatorioPessoalLattesUFCG.ipynb'
nome_arquivo = filename(nome)
# run with papermill
pm.execute_notebook(
notebook_template,
nome_arquivo,
parameters=dict(nome=nome),
)
return nome_arquivo
def generate_html_report(filename):
generate = subprocess.run(
[
"jupyter",
"nbconvert",
filename,
"--no-input",
"--no-prompt",
"--to=html",
]
)
print("HTML Report was generated")
return True
#-----------------------------------------------------------------------
def main():
#parentDir = os.path.dirname(__file__)
#PATH = '../../notebooks/'
#newPath = os.path.join(parentDir, os.path.abspath(PATH))
#caminho = os.path.abspath("../../src/pylattesLXML/")
#os.chdir(caminho)
#dirpath = os.getcwd()
parser = argparse.ArgumentParser(description='Gera relatório individual de produção')
parser.add_argument('--nome', required = True, help = 'Parâmetro nome para geração de relatório')
args = parser.parse_args()
nome = args.nome
execTemplate = run_notebook(nome)
try:
generate_html_report(execTemplate)
except:
print('deu merda')
return True
if __name__ == "__main__":
main()
#-----------------------------------------------------------------------
| 26.191176 | 101 | 0.580573 |
import os
import sys
import papermill as pm
from datetime import datetime as dt
import subprocess
import argparse
import warnings
warnings.filterwarnings("ignore")
def filename(nome):
inicio = dt.now().strftime('%d_%m_%Y_%M%S')
filename = 'RelatorioLattes-' + nome + '_' + inicio + '.ipynb'
return filename
def run_notebook(nome):
notebook_template = 'RelatorioPessoalLattesUFCG.ipynb'
nome_arquivo = filename(nome)
pm.execute_notebook(
notebook_template,
nome_arquivo,
parameters=dict(nome=nome),
)
return nome_arquivo
def generate_html_report(filename):
generate = subprocess.run(
[
"jupyter",
"nbconvert",
filename,
"--no-input",
"--no-prompt",
"--to=html",
]
)
print("HTML Report was generated")
return True
def main():
parser = argparse.ArgumentParser(description='Gera relatório individual de produção')
parser.add_argument('--nome', required = True, help = 'Parâmetro nome para geração de relatório')
args = parser.parse_args()
nome = args.nome
execTemplate = run_notebook(nome)
try:
generate_html_report(execTemplate)
except:
print('deu merda')
return True
if __name__ == "__main__":
main()
| true | true |
1c3991a4e576ccab340e3cc85f2ae2739e0b1392 | 370 | py | Python | template_project/settings.py | makspll/django-starter-template | f200d7c1626d74e0921cdfbced9040ee866ac2c1 | [
"MIT"
] | null | null | null | template_project/settings.py | makspll/django-starter-template | f200d7c1626d74e0921cdfbced9040ee866ac2c1 | [
"MIT"
] | 5 | 2021-03-30T14:21:46.000Z | 2021-09-22T19:41:59.000Z | template_project/settings.py | makspll/django-starter-template | f200d7c1626d74e0921cdfbced9040ee866ac2c1 | [
"MIT"
] | null | null | null | from split_settings.tools import optional, include
import os
ENV = os.environ.get("ENV_NAME",'dev')
BASE_SETTINGS = [
'components/common.py',
'components/languages.py',
'components/database.py',
'components/media.py',
'components/static.py',
'environments/{0}.py'.format(ENV),
optional('environments/local.py'),
]
include(*BASE_SETTINGS) | 21.764706 | 50 | 0.691892 | from split_settings.tools import optional, include
import os
ENV = os.environ.get("ENV_NAME",'dev')
BASE_SETTINGS = [
'components/common.py',
'components/languages.py',
'components/database.py',
'components/media.py',
'components/static.py',
'environments/{0}.py'.format(ENV),
optional('environments/local.py'),
]
include(*BASE_SETTINGS) | true | true |
1c3993eff7688207d56ccb8949d6ea2a04de9d4e | 908 | py | Python | setup.py | aabversteeg/simpleeval | 7999fd7d5c3928485b4633f5f93ab72931823fa5 | [
"MIT"
] | null | null | null | setup.py | aabversteeg/simpleeval | 7999fd7d5c3928485b4633f5f93ab72931823fa5 | [
"MIT"
] | null | null | null | setup.py | aabversteeg/simpleeval | 7999fd7d5c3928485b4633f5f93ab72931823fa5 | [
"MIT"
] | null | null | null | from setuptools import setup
__version__ = '0.9.5'
setup(
name='simpleeval',
py_modules=['simpleeval'],
version=__version__,
description='A simple, safe single expression evaluator library.',
long_description=open('README.rst', 'r').read(),
author='Daniel Fairhead',
author_email='danthedeckie@gmail.com',
url='https://github.com/danthedeckie/simpleeval',
download_url='https://github.com/danthedeckie/simpleeval/tarball/' + __version__,
keywords=['eval', 'simple', 'expression', 'parse', 'ast'],
test_suite='test_simpleeval',
use_2to3=True,
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
],
)
| 36.32 | 85 | 0.629956 | from setuptools import setup
__version__ = '0.9.5'
setup(
name='simpleeval',
py_modules=['simpleeval'],
version=__version__,
description='A simple, safe single expression evaluator library.',
long_description=open('README.rst', 'r').read(),
author='Daniel Fairhead',
author_email='danthedeckie@gmail.com',
url='https://github.com/danthedeckie/simpleeval',
download_url='https://github.com/danthedeckie/simpleeval/tarball/' + __version__,
keywords=['eval', 'simple', 'expression', 'parse', 'ast'],
test_suite='test_simpleeval',
use_2to3=True,
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
],
)
| true | true |
1c3994837f7bebf72235a21ddad66c5db7a1d511 | 877 | py | Python | setup.py | ExCiteS/geokey-airquality | 1c29351fa77a4ac1d834bbd1bb25b21ebb1dc57a | [
"MIT"
] | 1 | 2016-01-13T15:19:09.000Z | 2016-01-13T15:19:09.000Z | setup.py | ExCiteS/geokey-airquality | 1c29351fa77a4ac1d834bbd1bb25b21ebb1dc57a | [
"MIT"
] | 2 | 2015-12-08T14:33:07.000Z | 2018-09-20T10:01:07.000Z | setup.py | ExCiteS/geokey-airquality | 1c29351fa77a4ac1d834bbd1bb25b21ebb1dc57a | [
"MIT"
] | 1 | 2018-10-16T11:40:04.000Z | 2018-10-16T11:40:04.000Z | #!/usr/bin/env python
"""GeoKey extension for Air Quality functionality."""
from os.path import dirname, join
from setuptools import setup, find_packages
def read(file_name):
with open(join(dirname(__file__), file_name)) as file_object:
return file_object.read()
name = 'geokey-airquality'
version = __import__(name.replace('-', '_')).__version__
repository = join('https://github.com/ExCiteS', name)
setup(
name=name,
version=version,
description='GeoKey extension for Air Quality functionality',
long_description=read('README.rst'),
url=repository,
download_url=join(repository, 'tarball', version),
author='Mapping for Change',
author_email='info@mappingforchange.org.uk',
license='MIT',
packages=find_packages(exclude=['*.tests', '*.tests.*', 'tests.*']),
include_package_data=True,
install_requires=[],
)
| 27.40625 | 72 | 0.705815 |
from os.path import dirname, join
from setuptools import setup, find_packages
def read(file_name):
with open(join(dirname(__file__), file_name)) as file_object:
return file_object.read()
name = 'geokey-airquality'
version = __import__(name.replace('-', '_')).__version__
repository = join('https://github.com/ExCiteS', name)
setup(
name=name,
version=version,
description='GeoKey extension for Air Quality functionality',
long_description=read('README.rst'),
url=repository,
download_url=join(repository, 'tarball', version),
author='Mapping for Change',
author_email='info@mappingforchange.org.uk',
license='MIT',
packages=find_packages(exclude=['*.tests', '*.tests.*', 'tests.*']),
include_package_data=True,
install_requires=[],
)
| true | true |
1c3995166b14f0ac8c3c1dec2f8d8366704ebf20 | 3,141 | py | Python | setup.py | c-l-nguyen/pantab | b3602815232928e59ee85bae3f35cd204dd4f06d | [
"BSD-3-Clause"
] | 1 | 2020-10-12T13:35:56.000Z | 2020-10-12T13:35:56.000Z | setup.py | mhadi813/pantab | f5267c23fb06b9a7f0b7b03c98d67148c0e3058d | [
"BSD-3-Clause"
] | null | null | null | setup.py | mhadi813/pantab | f5267c23fb06b9a7f0b7b03c98d67148c0e3058d | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
from setuptools import Extension, find_packages, setup
try:
from tableauhyperapi.impl.util import find_hyper_api_dll
except ImportError: # renamed in version 0.0.10309
from tableauhyperapi.impl.util import find_hyper_api_library as find_hyper_api_dll
here = os.path.abspath(os.path.dirname(__file__))
dll_path = find_hyper_api_dll()
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
if sys.platform.startswith("win32"):
# Looks like the Tableau Python source doesn't have the needed lib file
# so extract from C++ distributions
import io
import zipfile
from urllib.request import urlopen
data = urlopen(
"http://downloads.tableau.com/tssoftware/tableauhyperapi-cxx-windows-x86_64"
"-release-hyperapi_release_6.0.0.10309.rf8b2e5f7.zip"
)
target = dll_path.parent / "tableauhyperapi.lib"
print(f"extract lib to {target}")
with zipfile.ZipFile(io.BytesIO(data.read())) as archive:
target.write_bytes(
archive.open(
"tableauhyperapi-cxx-windows-x86_64-release-hyperapi_release"
"_6.0.0.10309.rf8b2e5f7/lib/tableauhyperapi.lib"
).read()
)
extra_compile_args = ["-Wextra"]
# MSVC compiler has different flags; assume that's what we are using on Windows
if os.name == "nt":
extra_compile_args = ["/WX"]
else:
extra_compile_args = ["-Wextra", "-Werror"]
writer_module = Extension(
"libwriter",
sources=["pantab/pantab.c", "pantab/_writermodule.c"],
library_dirs=[str(dll_path.parent.resolve())],
libraries=[dll_path.stem.replace("lib", "")],
depends=["pantab/pantab.h", "pantab/cffi.h"],
extra_compile_args=extra_compile_args,
)
reader_module = Extension(
"libreader",
sources=["pantab/pantab.c", "pantab/_readermodule.c"],
library_dirs=[str(dll_path.parent.resolve())],
libraries=[dll_path.stem.replace("lib", "")],
depends=["pantab/pantab.h", "pantab/cffi.h"],
extra_compile_args=extra_compile_args,
)
setup(
name="pantab",
version="1.1.0",
description="Converts pandas DataFrames into Tableau Hyper Extracts",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/WillAyd/pantab",
author="Will Ayd",
author_email="william.ayd@icloud.com",
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Office/Business",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="tableau visualization pandas dataframe",
packages=find_packages(),
package_data={"": ["*.h"], "pantab.tests": ["data/*"]},
data_files=[("", ["LICENSE.txt", "README.md"])],
python_requires=">=3.6",
install_requires=["pandas", "tableauhyperapi"],
extras_require={"dev": ["pytest"]},
ext_modules=[writer_module, reader_module],
)
| 32.381443 | 86 | 0.672079 | import os
import sys
from setuptools import Extension, find_packages, setup
try:
from tableauhyperapi.impl.util import find_hyper_api_dll
except ImportError:
from tableauhyperapi.impl.util import find_hyper_api_library as find_hyper_api_dll
here = os.path.abspath(os.path.dirname(__file__))
dll_path = find_hyper_api_dll()
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
if sys.platform.startswith("win32"):
# so extract from C++ distributions
import io
import zipfile
from urllib.request import urlopen
data = urlopen(
"http://downloads.tableau.com/tssoftware/tableauhyperapi-cxx-windows-x86_64"
"-release-hyperapi_release_6.0.0.10309.rf8b2e5f7.zip"
)
target = dll_path.parent / "tableauhyperapi.lib"
print(f"extract lib to {target}")
with zipfile.ZipFile(io.BytesIO(data.read())) as archive:
target.write_bytes(
archive.open(
"tableauhyperapi-cxx-windows-x86_64-release-hyperapi_release"
"_6.0.0.10309.rf8b2e5f7/lib/tableauhyperapi.lib"
).read()
)
extra_compile_args = ["-Wextra"]
# MSVC compiler has different flags; assume that's what we are using on Windows
if os.name == "nt":
extra_compile_args = ["/WX"]
else:
extra_compile_args = ["-Wextra", "-Werror"]
writer_module = Extension(
"libwriter",
sources=["pantab/pantab.c", "pantab/_writermodule.c"],
library_dirs=[str(dll_path.parent.resolve())],
libraries=[dll_path.stem.replace("lib", "")],
depends=["pantab/pantab.h", "pantab/cffi.h"],
extra_compile_args=extra_compile_args,
)
reader_module = Extension(
"libreader",
sources=["pantab/pantab.c", "pantab/_readermodule.c"],
library_dirs=[str(dll_path.parent.resolve())],
libraries=[dll_path.stem.replace("lib", "")],
depends=["pantab/pantab.h", "pantab/cffi.h"],
extra_compile_args=extra_compile_args,
)
setup(
name="pantab",
version="1.1.0",
description="Converts pandas DataFrames into Tableau Hyper Extracts",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/WillAyd/pantab",
author="Will Ayd",
author_email="william.ayd@icloud.com",
license="BSD",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Office/Business",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="tableau visualization pandas dataframe",
packages=find_packages(),
package_data={"": ["*.h"], "pantab.tests": ["data/*"]},
data_files=[("", ["LICENSE.txt", "README.md"])],
python_requires=">=3.6",
install_requires=["pandas", "tableauhyperapi"],
extras_require={"dev": ["pytest"]},
ext_modules=[writer_module, reader_module],
)
| true | true |
1c3998313b8fbf3777d549c3a2945f1360c694e7 | 1,161 | py | Python | var/spack/repos/builtin/packages/libxfont2/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/libxfont2/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/libxfont2/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libxfont2(AutotoolsPackage, XorgPackage):
"""libXfont provides the core of the legacy X11 font system, handling the
index files (fonts.dir, fonts.alias, fonts.scale), the various font file
formats, and rasterizing them. It is used by the X servers, the
X Font Server (xfs), and some font utilities (bdftopcf for instance),
but should not be used by normal X11 clients. X11 clients access fonts
via either the new API's in libXft, or the legacy API's in libX11."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libXfont"
xorg_mirror_path = "lib/libXfont2-2.0.1.tar.gz"
version('2.0.1', sha256='381b6b385a69343df48a082523c856aed9042fbbc8ee0a6342fb502e4321230a')
depends_on('libfontenc')
depends_on('freetype')
depends_on('xtrans')
depends_on('xproto')
depends_on('fontsproto@2.1.3:')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 38.7 | 95 | 0.727821 |
from spack import *
class Libxfont2(AutotoolsPackage, XorgPackage):
homepage = "https://cgit.freedesktop.org/xorg/lib/libXfont"
xorg_mirror_path = "lib/libXfont2-2.0.1.tar.gz"
version('2.0.1', sha256='381b6b385a69343df48a082523c856aed9042fbbc8ee0a6342fb502e4321230a')
depends_on('libfontenc')
depends_on('freetype')
depends_on('xtrans')
depends_on('xproto')
depends_on('fontsproto@2.1.3:')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| true | true |
1c3998d4f906c41b8a647bc9aaa38e6d1ca78881 | 1,883 | py | Python | integration-tests/generate_data_coassembly_command.py | luispedro/SemiBin | 7a5c9c68bb29ec27b64d7b34ed88a2eab921314b | [
"MIT"
] | 25 | 2021-05-19T15:38:30.000Z | 2022-03-18T09:28:32.000Z | integration-tests/generate_data_coassembly_command.py | luispedro/SemiBin | 7a5c9c68bb29ec27b64d7b34ed88a2eab921314b | [
"MIT"
] | 39 | 2021-05-12T05:22:26.000Z | 2022-03-31T13:28:46.000Z | integration-tests/generate_data_coassembly_command.py | luispedro/SemiBin | 7a5c9c68bb29ec27b64d7b34ed88a2eab921314b | [
"MIT"
] | 5 | 2021-03-15T23:08:00.000Z | 2021-05-07T07:31:03.000Z | import os
import pandas as pd
import subprocess
### Input fa
subprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta -o output_coassembly_fa -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_fa/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_fa/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
### Input .gz
subprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.gz -o output_coassembly_gz -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_gz/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_gz/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
### Input .bz2
subprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.bz2 -o output_coassembly_bz2 -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_bz2/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_bz2/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
### Input .xz
subprocess.check_call('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.xz -o output_coassembly_xz -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_xz/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_xz/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
| 44.833333 | 232 | 0.774827 | import os
import pandas as pd
import subprocess
l('SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta -o output_coassembly_fa -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_fa/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_fa/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
'SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.gz -o output_coassembly_gz -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_gz/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_gz/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
emiBin generate_data_single -i test/coassembly_sample_data/input.fasta.bz2 -o output_coassembly_bz2 -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_bz2/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_bz2/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
'SemiBin generate_data_single -i test/coassembly_sample_data/input.fasta.xz -o output_coassembly_xz -m 2500 --ratio 0.05 --ml-threshold 4000 -p 1 -b test/coassembly_sample_data/input.sorted*.bam', shell=True)
data = pd.read_csv('output_coassembly_xz/data.csv', index_col=0)
data_split = pd.read_csv('output_coassembly_xz/data_split.csv', index_col=0)
assert data.shape == (40, 141)
assert data_split.shape == (80, 141)
| true | true |
1c3998dcd1f7fb374c19e7b1d43b9ea62225f6df | 711 | py | Python | leetcode/palindrome_number_without_string.py | abhik-93/python | db8dd0b8731f3687a5d0c7bf32b7ffe740569104 | [
"MIT"
] | null | null | null | leetcode/palindrome_number_without_string.py | abhik-93/python | db8dd0b8731f3687a5d0c7bf32b7ffe740569104 | [
"MIT"
] | null | null | null | leetcode/palindrome_number_without_string.py | abhik-93/python | db8dd0b8731f3687a5d0c7bf32b7ffe740569104 | [
"MIT"
] | null | null | null | class Solution(object):
def convertInttoList(self, n):
l = []
while n != 0:
l = [n % 10] + l
n = n // 10
return l
def isPalindrome(self, x):
if x <= 2**31 -1 and x>= -2**31:
if x>0:
x = Solution().convertInttoList(x)
return x == x[::-1]
elif x<0:
return False # Since negative integer will never be a palindrome
elif x == 0:
return True
print(Solution().isPalindrome(121))
print(Solution().isPalindrome(-121))
print(Solution().isPalindrome(10))
print(Solution().isPalindrome(-101))
print(Solution().isPalindrome(0))
#Runtime: 76 ms in Leetcode | 27.346154 | 80 | 0.524613 | class Solution(object):
def convertInttoList(self, n):
l = []
while n != 0:
l = [n % 10] + l
n = n // 10
return l
def isPalindrome(self, x):
if x <= 2**31 -1 and x>= -2**31:
if x>0:
x = Solution().convertInttoList(x)
return x == x[::-1]
elif x<0:
return False
elif x == 0:
return True
print(Solution().isPalindrome(121))
print(Solution().isPalindrome(-121))
print(Solution().isPalindrome(10))
print(Solution().isPalindrome(-101))
print(Solution().isPalindrome(0))
| true | true |
1c3998e6be14a17b2323d453ae57a027cf8058e9 | 1,494 | py | Python | classification/prepare_data.py | HuadingLing/astnn | 64fb02666245387d7aee80364ea4d421feb1594e | [
"MIT"
] | null | null | null | classification/prepare_data.py | HuadingLing/astnn | 64fb02666245387d7aee80364ea4d421feb1594e | [
"MIT"
] | null | null | null | classification/prepare_data.py | HuadingLing/astnn | 64fb02666245387d7aee80364ea4d421feb1594e | [
"MIT"
] | null | null | null | from pycparser import c_parser, c_ast
import pandas as pd
import os
import re
import sys
from gensim.models.word2vec import Word2Vec
import pickle
from tree import ASTNode, SingleNode
import numpy as np
def get_sequences(node, sequence):
current = SingleNode(node)
sequence.append(current.get_token())
for _, child in node.children():
get_sequences(child, sequence)
if current.get_token().lower() == 'compound':
sequence.append('End')
def get_blocks(node, block_seq):
children = node.children()
name = node.__class__.__name__
if name in ['FuncDef', 'If', 'For', 'While', 'DoWhile']:
block_seq.append(ASTNode(node))
if name != 'For':
skip = 1
else:
skip = len(children) - 1
for i in range(skip, len(children)):
child = children[i][1]
if child.__class__.__name__ not in ['FuncDef', 'If', 'For', 'While', 'DoWhile', 'Compound']:
block_seq.append(ASTNode(child))
get_blocks(child, block_seq)
elif name is 'Compound':
block_seq.append(ASTNode(name))
for _, child in node.children():
if child.__class__.__name__ not in ['If', 'For', 'While', 'DoWhile']:
block_seq.append(ASTNode(child))
get_blocks(child, block_seq)
block_seq.append(ASTNode('End'))
else:
for _, child in node.children():
get_blocks(child, block_seq)
| 18.675 | 104 | 0.603079 | from pycparser import c_parser, c_ast
import pandas as pd
import os
import re
import sys
from gensim.models.word2vec import Word2Vec
import pickle
from tree import ASTNode, SingleNode
import numpy as np
def get_sequences(node, sequence):
current = SingleNode(node)
sequence.append(current.get_token())
for _, child in node.children():
get_sequences(child, sequence)
if current.get_token().lower() == 'compound':
sequence.append('End')
def get_blocks(node, block_seq):
children = node.children()
name = node.__class__.__name__
if name in ['FuncDef', 'If', 'For', 'While', 'DoWhile']:
block_seq.append(ASTNode(node))
if name != 'For':
skip = 1
else:
skip = len(children) - 1
for i in range(skip, len(children)):
child = children[i][1]
if child.__class__.__name__ not in ['FuncDef', 'If', 'For', 'While', 'DoWhile', 'Compound']:
block_seq.append(ASTNode(child))
get_blocks(child, block_seq)
elif name is 'Compound':
block_seq.append(ASTNode(name))
for _, child in node.children():
if child.__class__.__name__ not in ['If', 'For', 'While', 'DoWhile']:
block_seq.append(ASTNode(child))
get_blocks(child, block_seq)
block_seq.append(ASTNode('End'))
else:
for _, child in node.children():
get_blocks(child, block_seq)
| true | true |
1c399a2d888171b25618f23f1c90aecc6fe52b0c | 308 | py | Python | agape/organizations/urls.py | codewiseio/django-agape | a63fc2cc63776c2b2cb16c7f6aee64f5783c3bba | [
"MIT"
] | null | null | null | agape/organizations/urls.py | codewiseio/django-agape | a63fc2cc63776c2b2cb16c7f6aee64f5783c3bba | [
"MIT"
] | null | null | null | agape/organizations/urls.py | codewiseio/django-agape | a63fc2cc63776c2b2cb16c7f6aee64f5783c3bba | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from .views import OrganizationViewSet
# build router
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'organizations', OrganizationViewSet, base_name='organizations')
urlpatterns = [
url(r'^',include(router.urls)),
] | 25.666667 | 81 | 0.788961 | from django.conf.urls import url, include
from .views import OrganizationViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'organizations', OrganizationViewSet, base_name='organizations')
urlpatterns = [
url(r'^',include(router.urls)),
] | true | true |
1c399c304ad15e29c7a808c06d887482964fccc3 | 712 | py | Python | easy_time_tracker/constants.py | btr1975/easy-time-tracker | 3889913844b41a6da55a8b1aa1721a9873349bf6 | [
"MIT"
] | null | null | null | easy_time_tracker/constants.py | btr1975/easy-time-tracker | 3889913844b41a6da55a8b1aa1721a9873349bf6 | [
"MIT"
] | null | null | null | easy_time_tracker/constants.py | btr1975/easy-time-tracker | 3889913844b41a6da55a8b1aa1721a9873349bf6 | [
"MIT"
] | null | null | null | """
Constants for easy_time_tracker
"""
import os
from pathlib import Path
# The absolute path to the easy-time-tracker directory
EASY_TIME_TRACKER_BASE_PATH = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0]
# The name to use for the current record
EASY_TIME_TRACKER_CURRENT_RECORD = os.getenv('EASY_TIME_TRACKER_CURRENT_RECORD') or \
os.path.join(Path.home(), 'easy-time-tracker-data', 'current_record.json')
# The name to use for the completed records
EASY_TIME_TRACKER_COMPLETED_RECORDS = os.getenv('EASY_TIME_TRACKER_COMPLETED_RECORDS') or \
os.path.join(Path.home(), 'easy-time-tracker-data', 'completed_records.json')
| 47.466667 | 115 | 0.710674 | import os
from pathlib import Path
EASY_TIME_TRACKER_BASE_PATH = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0]
EASY_TIME_TRACKER_CURRENT_RECORD = os.getenv('EASY_TIME_TRACKER_CURRENT_RECORD') or \
os.path.join(Path.home(), 'easy-time-tracker-data', 'current_record.json')
EASY_TIME_TRACKER_COMPLETED_RECORDS = os.getenv('EASY_TIME_TRACKER_COMPLETED_RECORDS') or \
os.path.join(Path.home(), 'easy-time-tracker-data', 'completed_records.json')
| true | true |
1c399d3ece99d61b605cd8d1d1e3f9156903cc35 | 6,130 | py | Python | src/generative/modify_attribute.py | rajivmanivannan/facenet | 4a896201dba3f8caf64ba4d5004d60eaf9aefd78 | [
"MIT"
] | null | null | null | src/generative/modify_attribute.py | rajivmanivannan/facenet | 4a896201dba3f8caf64ba4d5004d60eaf9aefd78 | [
"MIT"
] | null | null | null | src/generative/modify_attribute.py | rajivmanivannan/facenet | 4a896201dba3f8caf64ba4d5004d60eaf9aefd78 | [
"MIT"
] | 1 | 2020-03-03T05:44:19.000Z | 2020-03-03T05:44:19.000Z | # MIT License
#
# Copyright (c) 2017 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Modify attributes of images using attribute vectors calculated using
'calculate_attribute_vectors.py'. Images are generated from latent variables of
the CelebA datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sys
import argparse
import importlib
import facenet
import os
import numpy as np
import h5py
import math
from scipy import misc
def main(args):
img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
vae_def = importlib.import_module(args.vae_def)
vae = vae_def.Vae(args.latent_var_size)
gen_image_size = vae.get_image_size()
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
images = tf.placeholder(tf.float32, shape=(None,gen_image_size,gen_image_size,3), name='input')
# Normalize
images_norm = (images-img_mean) / img_stddev
# Resize to appropriate size for the encoder
images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size,gen_image_size))
# Create encoder network
mean, log_variance = vae.encoder(images_norm_resize, True)
epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
std = tf.exp(log_variance/2)
latent_var = mean + epsilon * std
# Create decoder
reconstructed_norm = vae.decoder(latent_var, False)
# Un-normalize
reconstructed = (reconstructed_norm*img_stddev) + img_mean
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Start running operations on the Graph
gpu_memory_fraction = 1.0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
vae_checkpoint = os.path.expanduser(args.vae_checkpoint)
print('Restoring VAE checkpoint: %s' % vae_checkpoint)
saver.restore(sess, vae_checkpoint)
filename = os.path.expanduser(args.attributes_filename)
with h5py.File(filename,'r') as f:
latent_vars = np.array(f.get('latent_vars'))
attributes = np.array(f.get('attributes'))
#fields = np.array(f.get('fields'))
attribute_vectors = np.array(f.get('attribute_vectors'))
# Reconstruct faces while adding varying amount of the selected attribute vector
attribute_index = 31 # 31: 'Smiling'
image_indices = [8,11,13,18,19,26,31,39,47,54,56,57,58,59,60,73]
nrof_images = len(image_indices)
nrof_interp_steps = 10
sweep_latent_var = np.zeros((nrof_interp_steps*nrof_images, args.latent_var_size), np.float32)
for j in range(nrof_images):
image_index = image_indices[j]
idx = np.argwhere(attributes[:,attribute_index]==-1)[image_index,0]
for i in range(nrof_interp_steps):
sweep_latent_var[i+nrof_interp_steps*j,:] = latent_vars[idx,:] + 5.0*i/nrof_interp_steps*attribute_vectors[attribute_index,:]
recon = sess.run(reconstructed, feed_dict={latent_var:sweep_latent_var})
img = facenet.put_images_on_grid(recon, shape=(nrof_interp_steps*2,int(math.ceil(nrof_images/2))))
image_filename = os.path.expanduser(args.output_image_filename)
print('Writing generated image to %s' % image_filename)
misc.imsave(image_filename, img)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('vae_def', type=str,
help='Model definition for the variational autoencoder. Points to a module containing the definition.')
parser.add_argument('vae_checkpoint', type=str,
help='Checkpoint file of a pre-trained variational autoencoder.')
parser.add_argument('attributes_filename', type=str,
help='The file containing the attribute vectors, as generated by calculate_attribute_vectors.py.')
parser.add_argument('output_image_filename', type=str,
help='File to write the generated image to.')
parser.add_argument('--latent_var_size', type=int,
help='Dimensionality of the latent variable.', default=100)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| 42.867133 | 145 | 0.684502 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sys
import argparse
import importlib
import facenet
import os
import numpy as np
import h5py
import math
from scipy import misc
def main(args):
img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
vae_def = importlib.import_module(args.vae_def)
vae = vae_def.Vae(args.latent_var_size)
gen_image_size = vae.get_image_size()
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
images = tf.placeholder(tf.float32, shape=(None,gen_image_size,gen_image_size,3), name='input')
images_norm = (images-img_mean) / img_stddev
images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size,gen_image_size))
mean, log_variance = vae.encoder(images_norm_resize, True)
epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
std = tf.exp(log_variance/2)
latent_var = mean + epsilon * std
reconstructed_norm = vae.decoder(latent_var, False)
reconstructed = (reconstructed_norm*img_stddev) + img_mean
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
gpu_memory_fraction = 1.0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
vae_checkpoint = os.path.expanduser(args.vae_checkpoint)
print('Restoring VAE checkpoint: %s' % vae_checkpoint)
saver.restore(sess, vae_checkpoint)
filename = os.path.expanduser(args.attributes_filename)
with h5py.File(filename,'r') as f:
latent_vars = np.array(f.get('latent_vars'))
attributes = np.array(f.get('attributes'))
attribute_vectors = np.array(f.get('attribute_vectors'))
attribute_index = 31
image_indices = [8,11,13,18,19,26,31,39,47,54,56,57,58,59,60,73]
nrof_images = len(image_indices)
nrof_interp_steps = 10
sweep_latent_var = np.zeros((nrof_interp_steps*nrof_images, args.latent_var_size), np.float32)
for j in range(nrof_images):
image_index = image_indices[j]
idx = np.argwhere(attributes[:,attribute_index]==-1)[image_index,0]
for i in range(nrof_interp_steps):
sweep_latent_var[i+nrof_interp_steps*j,:] = latent_vars[idx,:] + 5.0*i/nrof_interp_steps*attribute_vectors[attribute_index,:]
recon = sess.run(reconstructed, feed_dict={latent_var:sweep_latent_var})
img = facenet.put_images_on_grid(recon, shape=(nrof_interp_steps*2,int(math.ceil(nrof_images/2))))
image_filename = os.path.expanduser(args.output_image_filename)
print('Writing generated image to %s' % image_filename)
misc.imsave(image_filename, img)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('vae_def', type=str,
help='Model definition for the variational autoencoder. Points to a module containing the definition.')
parser.add_argument('vae_checkpoint', type=str,
help='Checkpoint file of a pre-trained variational autoencoder.')
parser.add_argument('attributes_filename', type=str,
help='The file containing the attribute vectors, as generated by calculate_attribute_vectors.py.')
parser.add_argument('output_image_filename', type=str,
help='File to write the generated image to.')
parser.add_argument('--latent_var_size', type=int,
help='Dimensionality of the latent variable.', default=100)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| true | true |
1c399daeface1f28ecc2913fe550bc3ecf8af487 | 3,159 | py | Python | tests/bugs/core_1550_postfix_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_1550_postfix_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_1550_postfix_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_1550_postfix
# title: Unnecessary index scan happens when the same index is mapped to both WHERE and ORDER BY clauses
# decription:
# http://sourceforge.net/p/firebird/code/60368
# Date: 2014-12-16 11:40:42 +0000 (Tue, 16 Dec 2014)
#
# First letter to dimitr: 30.09.2014 20:01.
# Reproduced on 3.0.0.31472 Beta 2 (10.dec.2014).
# Checked on:
# 3.0.3.32837: OK, 1.516s.
# 3.0.3.32838: OK, 0.953s.
# 4.0.0.800: OK, 1.625s.
# 4.0.0.801: OK, 1.125s.
#
# tracker_id:
# min_versions: ['3.0']
# versions: 3.0
# qmid:
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
-- sent to dimitr 30.09.14 at 22:09
set term ^;
execute block as
begin
execute statement 'drop sequence g';
when any do begin end
end^
set term ;^
commit;
create sequence g; commit;
recreate table td(id int primary key using index td_pk, f01 int, f02 int); commit;
recreate table tm(id int); commit;
insert into tm select gen_id(g,1) from rdb$types rows 100;
commit;
insert into td(id, f01, f02) select id, (select min(id) from tm), gen_id(g,1) from tm; commit;
create index td_f01_non_unq on td(f01);
create unique index td_f01_f02_unq on td(f01, f02); -- ### NB: compound UNIQUE index presens here beside of PK ###
commit;
set planonly;
-- 1. Check for usage when only PK fields are involved:
select *
from tm m
where exists(
select * from td d where m.id = d.id
order by d.id --------------------------- ### this "useless" order by should prevent from bitmap creation in 3.0+
);
-- Ineffective plan was here:
-- PLAN (D ORDER TD_PK INDEX (TD_PK))
-- ... ^
-- |
-- +-----> BITMAP created!
-- 2. Check for usage when fields from UNIQUE index are involved:
select *
from tm m
where exists(
select * from td d
where m.id = d.f01 and d.f02 = 10
order by d.f01, d.f02 ------------------- ### this "useless" order by should prevent from bitmap creation in 3.0+
);
-- Ineffective plan was here:
-- PLAN (D ORDER TD_F01_F02_UNQ INDEX (TD_F01_F02_UNQ))
-- ... ^
-- |
-- +-----> BITMAP created!
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN (D ORDER TD_PK)
PLAN (M NATURAL)
PLAN (D ORDER TD_F01_F02_UNQ)
PLAN (M NATURAL)
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 30.085714 | 121 | 0.550174 |
import pytest
from firebird.qa import db_factory, isql_act, Action
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
-- sent to dimitr 30.09.14 at 22:09
set term ^;
execute block as
begin
execute statement 'drop sequence g';
when any do begin end
end^
set term ;^
commit;
create sequence g; commit;
recreate table td(id int primary key using index td_pk, f01 int, f02 int); commit;
recreate table tm(id int); commit;
insert into tm select gen_id(g,1) from rdb$types rows 100;
commit;
insert into td(id, f01, f02) select id, (select min(id) from tm), gen_id(g,1) from tm; commit;
create index td_f01_non_unq on td(f01);
create unique index td_f01_f02_unq on td(f01, f02); -- ### NB: compound UNIQUE index presens here beside of PK ###
commit;
set planonly;
-- 1. Check for usage when only PK fields are involved:
select *
from tm m
where exists(
select * from td d where m.id = d.id
order by d.id --------------------------- ### this "useless" order by should prevent from bitmap creation in 3.0+
);
-- Ineffective plan was here:
-- PLAN (D ORDER TD_PK INDEX (TD_PK))
-- ... ^
-- |
-- +-----> BITMAP created!
-- 2. Check for usage when fields from UNIQUE index are involved:
select *
from tm m
where exists(
select * from td d
where m.id = d.f01 and d.f02 = 10
order by d.f01, d.f02 ------------------- ### this "useless" order by should prevent from bitmap creation in 3.0+
);
-- Ineffective plan was here:
-- PLAN (D ORDER TD_F01_F02_UNQ INDEX (TD_F01_F02_UNQ))
-- ... ^
-- |
-- +-----> BITMAP created!
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN (D ORDER TD_PK)
PLAN (M NATURAL)
PLAN (D ORDER TD_F01_F02_UNQ)
PLAN (M NATURAL)
"""
@pytest.mark.version('>=3.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| true | true |
1c399e484c6ee4793096c73639f63547f98ded20 | 774 | py | Python | accounts/permissions.py | mnpenchev/DocManSys_django_rest | 6b6a9be55d7bb1d44d887649aae69efbd000444d | [
"MIT"
] | null | null | null | accounts/permissions.py | mnpenchev/DocManSys_django_rest | 6b6a9be55d7bb1d44d887649aae69efbd000444d | [
"MIT"
] | null | null | null | accounts/permissions.py | mnpenchev/DocManSys_django_rest | 6b6a9be55d7bb1d44d887649aae69efbd000444d | [
"MIT"
] | null | null | null | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
""" allows user to update their own profile """
def has_object_permission(self, request, view, obj):
""" check if user is trying to edit their own proffile """
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
# class UpdateOwnStatus(permissions.BasePermission):
# """ Allow users to update their own documents """
#
# def has_object_permissions(self, request, view, obj):
# """ check the user is trying to update their own status """
# if request.method in permissions.SAFE_METHODS:
# return True
# return obj.user_profile.id == request.user.id
| 32.25 | 69 | 0.674419 | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
| true | true |
1c399f3f1011ac9707c0ce8570cb1153117e1986 | 2,083 | py | Python | scripts/data_convert/filter_queries.py | prateeksingh0001/FlexNeuART | ebc82ca4fe01436374c595db2429bc49fb9e1dd0 | [
"Apache-2.0"
] | null | null | null | scripts/data_convert/filter_queries.py | prateeksingh0001/FlexNeuART | ebc82ca4fe01436374c595db2429bc49fb9e1dd0 | [
"Apache-2.0"
] | null | null | null | scripts/data_convert/filter_queries.py | prateeksingh0001/FlexNeuART | ebc82ca4fe01436374c595db2429bc49fb9e1dd0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Filtering queries to exclude queries that might textually match queries from a set of sub-directories
import sys
import os
import json
import argparse
sys.path.append('.')
from scripts.data_convert.convert_common import FileWrapper, read_queries
from scripts.config import TEXT_FIELD_NAME, QUESTION_FILE_JSON
parser = argparse.ArgumentParser(description='Filter queries to exclude queries from given sub-directories')
parser.add_argument('--input_dir', metavar='input dir', help='input dir',
type=str, required=True)
parser.add_argument('--filter_query_dir', metavar='filtering query dir',
default=[],
help=f'all queries found in {QUESTION_FILE_JSON} files from these directories are ignored',
nargs='*')
parser.add_argument('--out_dir', metavar='output directory', help='output directory',
type=str, required=True)
args = parser.parse_args()
print(args)
arg_vars = vars(args)
ignore_queries = set()
for qfile_dir in args.filter_query_dir:
qfile_name = os.path.join(qfile_dir, QUESTION_FILE_JSON)
for e in read_queries(qfile_name):
if not TEXT_FIELD_NAME in e:
continue
ignore_queries.add(e[TEXT_FIELD_NAME])
print('Read queries from: ' + qfile_name)
print('A list of queries to ignore has %d entries' % (len(ignore_queries)))
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
out_file_queries = FileWrapper(os.path.join(args.out_dir, QUESTION_FILE_JSON), 'w')
read_qty = 0
wrote_qty = 0
for e in read_queries(os.path.join(args.input_dir, QUESTION_FILE_JSON)):
read_qty += 1
if not TEXT_FIELD_NAME in e:
continue
text = e[TEXT_FIELD_NAME]
if text in ignore_queries:
print(f"Ignoring query, which is found in specified query files: {text}'")
continue
wrote_qty += 1
out_file_queries.write(json.dumps(e) + '\n')
ignored_qty = read_qty - wrote_qty
print(f'Wrote {wrote_qty} queries, ignored {ignored_qty} queries')
out_file_queries.close()
| 31.560606 | 111 | 0.707633 |
import sys
import os
import json
import argparse
sys.path.append('.')
from scripts.data_convert.convert_common import FileWrapper, read_queries
from scripts.config import TEXT_FIELD_NAME, QUESTION_FILE_JSON
parser = argparse.ArgumentParser(description='Filter queries to exclude queries from given sub-directories')
parser.add_argument('--input_dir', metavar='input dir', help='input dir',
type=str, required=True)
parser.add_argument('--filter_query_dir', metavar='filtering query dir',
default=[],
help=f'all queries found in {QUESTION_FILE_JSON} files from these directories are ignored',
nargs='*')
parser.add_argument('--out_dir', metavar='output directory', help='output directory',
type=str, required=True)
args = parser.parse_args()
print(args)
arg_vars = vars(args)
ignore_queries = set()
for qfile_dir in args.filter_query_dir:
qfile_name = os.path.join(qfile_dir, QUESTION_FILE_JSON)
for e in read_queries(qfile_name):
if not TEXT_FIELD_NAME in e:
continue
ignore_queries.add(e[TEXT_FIELD_NAME])
print('Read queries from: ' + qfile_name)
print('A list of queries to ignore has %d entries' % (len(ignore_queries)))
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
out_file_queries = FileWrapper(os.path.join(args.out_dir, QUESTION_FILE_JSON), 'w')
read_qty = 0
wrote_qty = 0
for e in read_queries(os.path.join(args.input_dir, QUESTION_FILE_JSON)):
read_qty += 1
if not TEXT_FIELD_NAME in e:
continue
text = e[TEXT_FIELD_NAME]
if text in ignore_queries:
print(f"Ignoring query, which is found in specified query files: {text}'")
continue
wrote_qty += 1
out_file_queries.write(json.dumps(e) + '\n')
ignored_qty = read_qty - wrote_qty
print(f'Wrote {wrote_qty} queries, ignored {ignored_qty} queries')
out_file_queries.close()
| true | true |
1c399fadb353613479b7f9eb7478ded5c5946060 | 3,460 | py | Python | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/server.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/server.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/server.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .tracked_resource import TrackedResource
class Server(TrackedResource):
"""An Azure SQL Database server.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param identity: The Azure Active Directory identity of the server.
:type identity: ~azure.mgmt.sql.models.ResourceIdentity
:ivar kind: Kind of sql server. This is metadata used for the Azure portal
experience.
:vartype kind: str
:param administrator_login: Administrator username for the server. Once
created it cannot be changed.
:type administrator_login: str
:param administrator_login_password: The administrator login password
(required for server creation).
:type administrator_login_password: str
:param version: The version of the server.
:type version: str
:ivar state: The state of the server.
:vartype state: str
:ivar fully_qualified_domain_name: The fully qualified domain name of the
server.
:vartype fully_qualified_domain_name: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'readonly': True},
'state': {'readonly': True},
'fully_qualified_domain_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'administrator_login': {'key': 'properties.administratorLogin', 'type': 'str'},
'administrator_login_password': {'key': 'properties.administratorLoginPassword', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'fully_qualified_domain_name': {'key': 'properties.fullyQualifiedDomainName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Server, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.kind = None
self.administrator_login = kwargs.get('administrator_login', None)
self.administrator_login_password = kwargs.get('administrator_login_password', None)
self.version = kwargs.get('version', None)
self.state = None
self.fully_qualified_domain_name = None
| 39.770115 | 104 | 0.619942 |
from .tracked_resource import TrackedResource
class Server(TrackedResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'kind': {'readonly': True},
'state': {'readonly': True},
'fully_qualified_domain_name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'administrator_login': {'key': 'properties.administratorLogin', 'type': 'str'},
'administrator_login_password': {'key': 'properties.administratorLoginPassword', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'fully_qualified_domain_name': {'key': 'properties.fullyQualifiedDomainName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Server, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.kind = None
self.administrator_login = kwargs.get('administrator_login', None)
self.administrator_login_password = kwargs.get('administrator_login_password', None)
self.version = kwargs.get('version', None)
self.state = None
self.fully_qualified_domain_name = None
| true | true |
1c399ff53065a5193a230224651211ff389cc86d | 1,171 | py | Python | TUI/Inst/BOSS/TestData.py | sdss/snafui | 0793b036122755396f06f449080d9cdad7d508ec | [
"BSD-3-Clause"
] | 1 | 2018-03-07T02:47:36.000Z | 2018-03-07T02:47:36.000Z | TUI/Inst/BOSS/TestData.py | sdss/snafui | 0793b036122755396f06f449080d9cdad7d508ec | [
"BSD-3-Clause"
] | null | null | null | TUI/Inst/BOSS/TestData.py | sdss/snafui | 0793b036122755396f06f449080d9cdad7d508ec | [
"BSD-3-Clause"
] | null | null | null | import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher("boss", delay=1.0)
tuiModel = testDispatcher.tuiModel
ExposeMainDataList = (
"exposureState=IDLE, 0, 0",
"hardwareStatus=0x38",
"shutterStatus=0x1, 0x1",
"screenStatus=0x5, 0x5",
"motorPosition=5000, 4800, 5200, 3500, 3600, 3700",
"motorStatus=0x1, 0x1, 0x1, 0x1, 0x1, 0x1",
)
ExposeAnimDataSet = (
(
"shutterStatus=0x0, 0x1",
"exposureState=INTEGRATING, 4, 0",
),
(
"shutterStatus=0x0, 0x0",
"screenStatus=0x1, 0x4",
),
(
"shutterStatus=0x2, 0x0",
"screenStatus=0x9, 0x6",
),
(
"shutterStatus=0x2, 0x2",
"screenStatus=0x0, 0x0",
),
(
"shutterStatus=0x2, 0x0",
"screenStatus=0x6, 0x9",
),
(
"shutterStatus=0x0, 0x0",
"screenStatus=0x4, 0x1",
),
(
"shutterStatus=0x0, 0x1",
"screenStatus=0x5, 0x5",
),
(
"shutterStatus=0x3, 0x1",
),
(
"shutterStatus=0x1, 0x1",
),
)
def exposeStart():
testDispatcher.dispatch(ExposeMainDataList)
def exposeAnimate(dataIter=None):
testDispatcher.runDataSet(ExposeAnimDataSet)
| 20.54386 | 74 | 0.614859 | import TUI.Base.TestDispatcher
testDispatcher = TUI.Base.TestDispatcher.TestDispatcher("boss", delay=1.0)
tuiModel = testDispatcher.tuiModel
ExposeMainDataList = (
"exposureState=IDLE, 0, 0",
"hardwareStatus=0x38",
"shutterStatus=0x1, 0x1",
"screenStatus=0x5, 0x5",
"motorPosition=5000, 4800, 5200, 3500, 3600, 3700",
"motorStatus=0x1, 0x1, 0x1, 0x1, 0x1, 0x1",
)
ExposeAnimDataSet = (
(
"shutterStatus=0x0, 0x1",
"exposureState=INTEGRATING, 4, 0",
),
(
"shutterStatus=0x0, 0x0",
"screenStatus=0x1, 0x4",
),
(
"shutterStatus=0x2, 0x0",
"screenStatus=0x9, 0x6",
),
(
"shutterStatus=0x2, 0x2",
"screenStatus=0x0, 0x0",
),
(
"shutterStatus=0x2, 0x0",
"screenStatus=0x6, 0x9",
),
(
"shutterStatus=0x0, 0x0",
"screenStatus=0x4, 0x1",
),
(
"shutterStatus=0x0, 0x1",
"screenStatus=0x5, 0x5",
),
(
"shutterStatus=0x3, 0x1",
),
(
"shutterStatus=0x1, 0x1",
),
)
def exposeStart():
testDispatcher.dispatch(ExposeMainDataList)
def exposeAnimate(dataIter=None):
testDispatcher.runDataSet(ExposeAnimDataSet)
| true | true |
1c39a02fd23ef778076844212fc5039acb5837e0 | 1,902 | py | Python | mil_common/utils/mil_tools/mil_misc_tools/serial_tools.py | RishiKumarRay/mil | f3746a91e68aac713e86b380cdda8852ba826170 | [
"MIT"
] | 27 | 2020-02-17T21:54:09.000Z | 2022-03-18T17:49:23.000Z | mil_common/utils/mil_tools/mil_misc_tools/serial_tools.py | RishiKumarRay/mil | f3746a91e68aac713e86b380cdda8852ba826170 | [
"MIT"
] | 325 | 2019-09-11T14:13:56.000Z | 2022-03-31T00:38:30.000Z | mil_common/utils/mil_tools/mil_misc_tools/serial_tools.py | RishiKumarRay/mil | f3746a91e68aac713e86b380cdda8852ba826170 | [
"MIT"
] | 24 | 2019-09-16T00:29:45.000Z | 2022-03-06T10:56:38.000Z | #!/usr/bin/env python
import serial
def hexify(buff):
'''
Print a string displaying the bytes in hex format
example: hexify(my_packet) -> c0:14:09:48:45:4c:4c:4f:c1
'''
return ':'.join(b.encode('hex') for b in buff)
class NoopSerial(serial.Serial):
'''
Inherits from serial.Serial, doing nothing for each function.
Allows super classes to implement custom behavior for simulating
serial devices.
'''
port = 'noop-serial'
def __init__(*args, **kwargs):
pass
def open(self):
pass
@property
def in_waiting(self):
return 0
@property
def out_waiting(self):
return 0
def close(self):
pass
def __del__(self):
pass
def read(self, **kwargs):
pass
def write(self, *args):
pass
def flush(self):
pass
def flushInput(self):
pass
def flushOuput(self):
pass
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def send_break(self, *args, **kwargs):
pass
class SimulatedSerial(NoopSerial):
'''
Simulates a serial device, storing a buffer to be read in a program like a normal OS serial device.
Intended to be extended by other classes, which should override the write function to recieve writes to
the simulated device. These classes simply append to the buffer string which will be returned
on reads to the simulated device.
Note: NoopSerial and SimulatedSerial are generic and are candidates for mil_common.
'''
def __init__(self, *args, **kwargs):
self.buffer = ''
@property
def in_waiting(self):
return len(self.buffer)
def reset_input_buffer(self):
self.buffer = ''
def read(self, length):
b, self.buffer = self.buffer[0:length], self.buffer[length:]
return b
| 21.133333 | 107 | 0.627234 |
import serial
def hexify(buff):
return ':'.join(b.encode('hex') for b in buff)
class NoopSerial(serial.Serial):
port = 'noop-serial'
def __init__(*args, **kwargs):
pass
def open(self):
pass
@property
def in_waiting(self):
return 0
@property
def out_waiting(self):
return 0
def close(self):
pass
def __del__(self):
pass
def read(self, **kwargs):
pass
def write(self, *args):
pass
def flush(self):
pass
def flushInput(self):
pass
def flushOuput(self):
pass
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def send_break(self, *args, **kwargs):
pass
class SimulatedSerial(NoopSerial):
def __init__(self, *args, **kwargs):
self.buffer = ''
@property
def in_waiting(self):
return len(self.buffer)
def reset_input_buffer(self):
self.buffer = ''
def read(self, length):
b, self.buffer = self.buffer[0:length], self.buffer[length:]
return b
| true | true |
1c39a0fcb46e2f61334de27052d82344b2c58d05 | 1,983 | py | Python | longest_common_subsequence_print_alt.py | tusharsadhwani/leetcode | a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8 | [
"MIT"
] | 6 | 2021-05-21T01:10:42.000Z | 2021-12-16T16:12:30.000Z | longest_common_subsequence_print_alt.py | tusharsadhwani/leetcode | a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8 | [
"MIT"
] | null | null | null | longest_common_subsequence_print_alt.py | tusharsadhwani/leetcode | a17a8a7587c5654f05fcd13ae7cdf47263ab2ea8 | [
"MIT"
] | null | null | null | from collections import defaultdict
# Printing the largest subsequence using the alt-Solution, without storing strings
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> str:
cache: defaultdict[int, defaultdict[int, int]] = defaultdict(lambda: defaultdict(int))
for index1, char1 in enumerate(text1, start=1):
for index2, char2 in enumerate(text2, start=1):
if char1 == char2:
cache[index1][index2] = 1 + cache[index1-1][index2-1]
else:
cache[index1][index2] = max(
cache[index1][index2-1],
cache[index1-1][index2],
)
# Now the trick is to back-track through the DP values.
# If we find indices where the character matches, we add it to our answer,
# and go diagonally towards index (i-1, j-1).
# Otherwise, we follow backwards towards the larger of the two adjacent values.
answer_reverse = ''
index1, index2 = len(text1), len(text2)
while index1 > 0 and index2 > 0:
# Since cache indices go from 1 to N instead of 0 to N-1, we have to subtract 1
if text1[index1-1] == text2[index2-1]:
answer_reverse += text1[index1-1]
index1 -= 1
index2 -= 1
else:
if cache[index1-1][index2] > cache[index1][index2-1]:
index1 -= 1
else:
index2 -= 1
return answer_reverse[::-1]
tests = [
(
("abcde", "ace",),
"ace",
),
(
("abc", "abc",),
"abc",
),
(
("abc", "def",),
"",
),
(
("abcddrh", "abddghj",),
"abddh",
),
(
("opmtqvejqvudezchsloxizynabehqbyzknunobehkzqtkt",
"srwbovohkvqhwrwvizebsrszcxepqrenilmvadqxuncpwhe",),
"ovqvezcxeqnunh",
),
]
| 30.984375 | 94 | 0.520424 | from collections import defaultdict
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> str:
cache: defaultdict[int, defaultdict[int, int]] = defaultdict(lambda: defaultdict(int))
for index1, char1 in enumerate(text1, start=1):
for index2, char2 in enumerate(text2, start=1):
if char1 == char2:
cache[index1][index2] = 1 + cache[index1-1][index2-1]
else:
cache[index1][index2] = max(
cache[index1][index2-1],
cache[index1-1][index2],
)
answer_reverse = ''
index1, index2 = len(text1), len(text2)
while index1 > 0 and index2 > 0:
if text1[index1-1] == text2[index2-1]:
answer_reverse += text1[index1-1]
index1 -= 1
index2 -= 1
else:
if cache[index1-1][index2] > cache[index1][index2-1]:
index1 -= 1
else:
index2 -= 1
return answer_reverse[::-1]
tests = [
(
("abcde", "ace",),
"ace",
),
(
("abc", "abc",),
"abc",
),
(
("abc", "def",),
"",
),
(
("abcddrh", "abddghj",),
"abddh",
),
(
("opmtqvejqvudezchsloxizynabehqbyzknunobehkzqtkt",
"srwbovohkvqhwrwvizebsrszcxepqrenilmvadqxuncpwhe",),
"ovqvezcxeqnunh",
),
]
| true | true |
1c39a128187cb933b60c03ad0668631ab0287fb1 | 16,795 | py | Python | cspace/main/ui/Ui_MainWindow.py | jmcvetta/cspace | 13d6be86dfa3417ac5511b1b2c15d1050fe3701d | [
"RSA-MD",
"TCP-wrappers"
] | 28 | 2015-04-24T12:04:08.000Z | 2022-01-07T15:13:10.000Z | cspace/main/ui/Ui_MainWindow.py | Magnus167/cspace | 13d6be86dfa3417ac5511b1b2c15d1050fe3701d | [
"RSA-MD",
"TCP-wrappers"
] | 1 | 2015-12-09T10:21:59.000Z | 2015-12-16T05:38:55.000Z | cspace/main/ui/Ui_MainWindow.py | Magnus167/cspace | 13d6be86dfa3417ac5511b1b2c15d1050fe3701d | [
"RSA-MD",
"TCP-wrappers"
] | 18 | 2015-01-01T02:16:49.000Z | 2021-08-12T04:14:57.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created: Mon Oct 09 13:21:16 2006
# by: PyQt4 UI code generator 4.0.1
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(QtCore.QSize(QtCore.QRect(0,0,283,376).size()).expandedTo(MainWindow.minimumSizeHint()))
MainWindow.setWindowIcon(QtGui.QIcon(":/images/cspace32.png"))
MainWindow.setIconSize(QtCore.QSize(24,24))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.vboxlayout = QtGui.QVBoxLayout(self.centralwidget)
self.vboxlayout.setMargin(0)
self.vboxlayout.setSpacing(0)
self.vboxlayout.setObjectName("vboxlayout")
self.stack = QtGui.QStackedWidget(self.centralwidget)
self.stack.setObjectName("stack")
self.contactsPage = QtGui.QWidget()
self.contactsPage.setObjectName("contactsPage")
self.vboxlayout1 = QtGui.QVBoxLayout(self.contactsPage)
self.vboxlayout1.setMargin(0)
self.vboxlayout1.setSpacing(0)
self.vboxlayout1.setObjectName("vboxlayout1")
self.contacts = QtGui.QListWidget(self.contactsPage)
self.contacts.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.contacts.setIconSize(QtCore.QSize(24,24))
self.contacts.setResizeMode(QtGui.QListView.Adjust)
self.contacts.setObjectName("contacts")
self.vboxlayout1.addWidget(self.contacts)
self.stack.addWidget(self.contactsPage)
self.offlinePage = QtGui.QWidget()
self.offlinePage.setObjectName("offlinePage")
self.vboxlayout2 = QtGui.QVBoxLayout(self.offlinePage)
self.vboxlayout2.setMargin(0)
self.vboxlayout2.setSpacing(0)
self.vboxlayout2.setObjectName("vboxlayout2")
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setMargin(0)
self.hboxlayout.setSpacing(6)
self.hboxlayout.setObjectName("hboxlayout")
spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem)
self.goOnlineButton = QtGui.QPushButton(self.offlinePage)
self.goOnlineButton.setObjectName("goOnlineButton")
self.hboxlayout.addWidget(self.goOnlineButton)
spacerItem1 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem1)
self.vboxlayout2.addLayout(self.hboxlayout)
self.hboxlayout1 = QtGui.QHBoxLayout()
self.hboxlayout1.setMargin(0)
self.hboxlayout1.setSpacing(6)
self.hboxlayout1.setObjectName("hboxlayout1")
spacerItem2 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout1.addItem(spacerItem2)
self.createKeyButton = QtGui.QPushButton(self.offlinePage)
self.createKeyButton.setObjectName("createKeyButton")
self.hboxlayout1.addWidget(self.createKeyButton)
spacerItem3 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout1.addItem(spacerItem3)
self.vboxlayout2.addLayout(self.hboxlayout1)
self.stack.addWidget(self.offlinePage)
self.offlineNoUsersPage = QtGui.QWidget()
self.offlineNoUsersPage.setObjectName("offlineNoUsersPage")
self.vboxlayout3 = QtGui.QVBoxLayout(self.offlineNoUsersPage)
self.vboxlayout3.setMargin(0)
self.vboxlayout3.setSpacing(0)
self.vboxlayout3.setObjectName("vboxlayout3")
self.hboxlayout2 = QtGui.QHBoxLayout()
self.hboxlayout2.setMargin(0)
self.hboxlayout2.setSpacing(0)
self.hboxlayout2.setObjectName("hboxlayout2")
spacerItem4 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout2.addItem(spacerItem4)
self.createKeyButton1 = QtGui.QPushButton(self.offlineNoUsersPage)
self.createKeyButton1.setObjectName("createKeyButton1")
self.hboxlayout2.addWidget(self.createKeyButton1)
spacerItem5 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout2.addItem(spacerItem5)
self.vboxlayout3.addLayout(self.hboxlayout2)
self.stack.addWidget(self.offlineNoUsersPage)
self.connectingPage = QtGui.QWidget()
self.connectingPage.setObjectName("connectingPage")
self.vboxlayout4 = QtGui.QVBoxLayout(self.connectingPage)
self.vboxlayout4.setMargin(9)
self.vboxlayout4.setSpacing(6)
self.vboxlayout4.setObjectName("vboxlayout4")
spacerItem6 = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)
self.vboxlayout4.addItem(spacerItem6)
self.hboxlayout3 = QtGui.QHBoxLayout()
self.hboxlayout3.setMargin(0)
self.hboxlayout3.setSpacing(6)
self.hboxlayout3.setObjectName("hboxlayout3")
spacerItem7 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout3.addItem(spacerItem7)
self.vboxlayout5 = QtGui.QVBoxLayout()
self.vboxlayout5.setMargin(0)
self.vboxlayout5.setSpacing(6)
self.vboxlayout5.setObjectName("vboxlayout5")
self.connectStatus = QtGui.QLabel(self.connectingPage)
self.connectStatus.setAlignment(QtCore.Qt.AlignCenter)
self.connectStatus.setObjectName("connectStatus")
self.vboxlayout5.addWidget(self.connectStatus)
self.connectCancelButton = QtGui.QPushButton(self.connectingPage)
self.connectCancelButton.setObjectName("connectCancelButton")
self.vboxlayout5.addWidget(self.connectCancelButton)
self.hboxlayout3.addLayout(self.vboxlayout5)
spacerItem8 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout3.addItem(spacerItem8)
self.vboxlayout4.addLayout(self.hboxlayout3)
spacerItem9 = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)
self.vboxlayout4.addItem(spacerItem9)
self.stack.addWidget(self.connectingPage)
self.vboxlayout.addWidget(self.stack)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0,0,283,21))
self.menubar.setObjectName("menubar")
self.menu_Help = QtGui.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menuC_ontacts = QtGui.QMenu(self.menubar)
self.menuC_ontacts.setObjectName("menuC_ontacts")
self.menu_CSpace = QtGui.QMenu(self.menubar)
self.menu_CSpace.setObjectName("menu_CSpace")
self.menuO_ptions = QtGui.QMenu(self.menubar)
self.menuO_ptions.setObjectName("menuO_ptions")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setOrientation(QtCore.Qt.Horizontal)
self.toolBar.setIconSize(QtCore.QSize(32,32))
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(self.toolBar)
self.actionCreateKey = QtGui.QAction(MainWindow)
self.actionCreateKey.setIcon(QtGui.QIcon(":/images/register32.png"))
self.actionCreateKey.setObjectName("actionCreateKey")
self.actionGoOnline = QtGui.QAction(MainWindow)
self.actionGoOnline.setIcon(QtGui.QIcon(":/images/connect32.png"))
self.actionGoOnline.setObjectName("actionGoOnline")
self.actionGoOffline = QtGui.QAction(MainWindow)
self.actionGoOffline.setIcon(QtGui.QIcon(":/images/disconnect32.png"))
self.actionGoOffline.setObjectName("actionGoOffline")
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setIcon(QtGui.QIcon(":/images/exit32.png"))
self.actionExit.setObjectName("actionExit")
self.actionAddContact = QtGui.QAction(MainWindow)
self.actionAddContact.setIcon(QtGui.QIcon(":/images/user_add32.png"))
self.actionAddContact.setObjectName("actionAddContact")
self.actionRefreshStatus = QtGui.QAction(MainWindow)
self.actionRefreshStatus.setIcon(QtGui.QIcon(":/images/refresh32.png"))
self.actionRefreshStatus.setObjectName("actionRefreshStatus")
self.actionCheckStatus = QtGui.QAction(MainWindow)
self.actionCheckStatus.setIcon(QtGui.QIcon(":/images/refresh32.png"))
self.actionCheckStatus.setObjectName("actionCheckStatus")
self.actionContactInfo = QtGui.QAction(MainWindow)
self.actionContactInfo.setIcon(QtGui.QIcon(":/images/contact_info32.png"))
self.actionContactInfo.setObjectName("actionContactInfo")
self.actionRemoveContact = QtGui.QAction(MainWindow)
self.actionRemoveContact.setIcon(QtGui.QIcon(":/images/user_remove32.png"))
self.actionRemoveContact.setObjectName("actionRemoveContact")
self.actionEditPermissions = QtGui.QAction(MainWindow)
self.actionEditPermissions.setIcon(QtGui.QIcon(":/images/edit_permissions32.png"))
self.actionEditPermissions.setObjectName("actionEditPermissions")
self.actionAboutCSpace = QtGui.QAction(MainWindow)
self.actionAboutCSpace.setIcon(QtGui.QIcon(":/images/cspace32.png"))
self.actionAboutCSpace.setObjectName("actionAboutCSpace")
self.actionKeyInfo = QtGui.QAction(MainWindow)
self.actionKeyInfo.setIcon(QtGui.QIcon(":/images/key_info32.png"))
self.actionKeyInfo.setObjectName("actionKeyInfo")
self.menu_Help.addAction(self.actionAboutCSpace)
self.menuC_ontacts.addAction(self.actionAddContact)
self.menuC_ontacts.addAction(self.actionRefreshStatus)
self.menu_CSpace.addAction(self.actionGoOnline)
self.menu_CSpace.addAction(self.actionGoOffline)
self.menu_CSpace.addAction(self.actionKeyInfo)
self.menu_CSpace.addSeparator()
self.menu_CSpace.addAction(self.actionCreateKey)
self.menu_CSpace.addSeparator()
self.menu_CSpace.addAction(self.actionExit)
self.menuO_ptions.addAction(self.actionEditPermissions)
self.menubar.addAction(self.menu_CSpace.menuAction())
self.menubar.addAction(self.menuC_ontacts.menuAction())
self.menubar.addAction(self.menuO_ptions.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.toolBar.addAction(self.actionGoOnline)
self.toolBar.addAction(self.actionCreateKey)
self.toolBar.addAction(self.actionGoOffline)
self.toolBar.addAction(self.actionExit)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionAddContact)
self.toolBar.addAction(self.actionRefreshStatus)
self.retranslateUi(MainWindow)
self.stack.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "CSpace", None, QtGui.QApplication.UnicodeUTF8))
self.contacts.clear()
item = QtGui.QListWidgetItem(self.contacts)
item.setText(QtGui.QApplication.translate("MainWindow", "Item 1", None, QtGui.QApplication.UnicodeUTF8))
item.setIcon(QtGui.QIcon(":/images/user_online.png"))
item1 = QtGui.QListWidgetItem(self.contacts)
item1.setText(QtGui.QApplication.translate("MainWindow", "Item 2", None, QtGui.QApplication.UnicodeUTF8))
item1.setIcon(QtGui.QIcon(":/images/user_offline.png"))
item2 = QtGui.QListWidgetItem(self.contacts)
item2.setText(QtGui.QApplication.translate("MainWindow", "Item 3", None, QtGui.QApplication.UnicodeUTF8))
item2.setIcon(QtGui.QIcon(":/images/user_online.png"))
item3 = QtGui.QListWidgetItem(self.contacts)
item3.setText(QtGui.QApplication.translate("MainWindow", "Item 4", None, QtGui.QApplication.UnicodeUTF8))
item3.setIcon(QtGui.QIcon(":/images/user_offline.png"))
self.goOnlineButton.setText(QtGui.QApplication.translate("MainWindow", "Go Online...", None, QtGui.QApplication.UnicodeUTF8))
self.createKeyButton.setText(QtGui.QApplication.translate("MainWindow", "Create Private Key...", None, QtGui.QApplication.UnicodeUTF8))
self.createKeyButton1.setText(QtGui.QApplication.translate("MainWindow", "Create Private Key...", None, QtGui.QApplication.UnicodeUTF8))
self.connectStatus.setText(QtGui.QApplication.translate("MainWindow", "<html><head><meta name=\"qrichtext\" content=\"1\" /></head><body style=\" white-space: pre-wrap; font-family:MS Shell Dlg; font-size:8.25pt; font-weight:400; font-style:normal; text-decoration:none;\"><p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><span style=\" font-weight:600;\">Connect failed.</span></p><p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt; font-weight:600;\">Reconnecting in 30 second(s)...</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.connectCancelButton.setText(QtGui.QApplication.translate("MainWindow", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Help.setTitle(QtGui.QApplication.translate("MainWindow", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuC_ontacts.setTitle(QtGui.QApplication.translate("MainWindow", "C&ontacts", None, QtGui.QApplication.UnicodeUTF8))
self.menu_CSpace.setTitle(QtGui.QApplication.translate("MainWindow", "&CSpace", None, QtGui.QApplication.UnicodeUTF8))
self.menuO_ptions.setTitle(QtGui.QApplication.translate("MainWindow", "O&ptions", None, QtGui.QApplication.UnicodeUTF8))
self.actionCreateKey.setText(QtGui.QApplication.translate("MainWindow", "&Create Private Key...", None, QtGui.QApplication.UnicodeUTF8))
self.actionCreateKey.setIconText(QtGui.QApplication.translate("MainWindow", "Create Private Key", None, QtGui.QApplication.UnicodeUTF8))
self.actionCreateKey.setToolTip(QtGui.QApplication.translate("MainWindow", "Create Private Key", None, QtGui.QApplication.UnicodeUTF8))
self.actionGoOnline.setText(QtGui.QApplication.translate("MainWindow", "&Go Online...", None, QtGui.QApplication.UnicodeUTF8))
self.actionGoOffline.setText(QtGui.QApplication.translate("MainWindow", "Go &Offline", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.actionAddContact.setText(QtGui.QApplication.translate("MainWindow", "&Add Contact...", None, QtGui.QApplication.UnicodeUTF8))
self.actionAddContact.setIconText(QtGui.QApplication.translate("MainWindow", "Add Contact", None, QtGui.QApplication.UnicodeUTF8))
self.actionAddContact.setToolTip(QtGui.QApplication.translate("MainWindow", "Add Contact", None, QtGui.QApplication.UnicodeUTF8))
self.actionRefreshStatus.setText(QtGui.QApplication.translate("MainWindow", "Refresh &Status", None, QtGui.QApplication.UnicodeUTF8))
self.actionCheckStatus.setText(QtGui.QApplication.translate("MainWindow", "&Check Status", None, QtGui.QApplication.UnicodeUTF8))
self.actionContactInfo.setText(QtGui.QApplication.translate("MainWindow", "Contact &Information...", None, QtGui.QApplication.UnicodeUTF8))
self.actionRemoveContact.setText(QtGui.QApplication.translate("MainWindow", "Remove Contact", None, QtGui.QApplication.UnicodeUTF8))
self.actionEditPermissions.setText(QtGui.QApplication.translate("MainWindow", "&Edit Permissions...", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutCSpace.setText(QtGui.QApplication.translate("MainWindow", "&About CSpace...", None, QtGui.QApplication.UnicodeUTF8))
self.actionKeyInfo.setText(QtGui.QApplication.translate("MainWindow", "Key Information...", None, QtGui.QApplication.UnicodeUTF8))
| 55.612583 | 721 | 0.718488 |
import sys
from PyQt4 import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(QtCore.QSize(QtCore.QRect(0,0,283,376).size()).expandedTo(MainWindow.minimumSizeHint()))
MainWindow.setWindowIcon(QtGui.QIcon(":/images/cspace32.png"))
MainWindow.setIconSize(QtCore.QSize(24,24))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.vboxlayout = QtGui.QVBoxLayout(self.centralwidget)
self.vboxlayout.setMargin(0)
self.vboxlayout.setSpacing(0)
self.vboxlayout.setObjectName("vboxlayout")
self.stack = QtGui.QStackedWidget(self.centralwidget)
self.stack.setObjectName("stack")
self.contactsPage = QtGui.QWidget()
self.contactsPage.setObjectName("contactsPage")
self.vboxlayout1 = QtGui.QVBoxLayout(self.contactsPage)
self.vboxlayout1.setMargin(0)
self.vboxlayout1.setSpacing(0)
self.vboxlayout1.setObjectName("vboxlayout1")
self.contacts = QtGui.QListWidget(self.contactsPage)
self.contacts.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.contacts.setIconSize(QtCore.QSize(24,24))
self.contacts.setResizeMode(QtGui.QListView.Adjust)
self.contacts.setObjectName("contacts")
self.vboxlayout1.addWidget(self.contacts)
self.stack.addWidget(self.contactsPage)
self.offlinePage = QtGui.QWidget()
self.offlinePage.setObjectName("offlinePage")
self.vboxlayout2 = QtGui.QVBoxLayout(self.offlinePage)
self.vboxlayout2.setMargin(0)
self.vboxlayout2.setSpacing(0)
self.vboxlayout2.setObjectName("vboxlayout2")
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setMargin(0)
self.hboxlayout.setSpacing(6)
self.hboxlayout.setObjectName("hboxlayout")
spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem)
self.goOnlineButton = QtGui.QPushButton(self.offlinePage)
self.goOnlineButton.setObjectName("goOnlineButton")
self.hboxlayout.addWidget(self.goOnlineButton)
spacerItem1 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem1)
self.vboxlayout2.addLayout(self.hboxlayout)
self.hboxlayout1 = QtGui.QHBoxLayout()
self.hboxlayout1.setMargin(0)
self.hboxlayout1.setSpacing(6)
self.hboxlayout1.setObjectName("hboxlayout1")
spacerItem2 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout1.addItem(spacerItem2)
self.createKeyButton = QtGui.QPushButton(self.offlinePage)
self.createKeyButton.setObjectName("createKeyButton")
self.hboxlayout1.addWidget(self.createKeyButton)
spacerItem3 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout1.addItem(spacerItem3)
self.vboxlayout2.addLayout(self.hboxlayout1)
self.stack.addWidget(self.offlinePage)
self.offlineNoUsersPage = QtGui.QWidget()
self.offlineNoUsersPage.setObjectName("offlineNoUsersPage")
self.vboxlayout3 = QtGui.QVBoxLayout(self.offlineNoUsersPage)
self.vboxlayout3.setMargin(0)
self.vboxlayout3.setSpacing(0)
self.vboxlayout3.setObjectName("vboxlayout3")
self.hboxlayout2 = QtGui.QHBoxLayout()
self.hboxlayout2.setMargin(0)
self.hboxlayout2.setSpacing(0)
self.hboxlayout2.setObjectName("hboxlayout2")
spacerItem4 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout2.addItem(spacerItem4)
self.createKeyButton1 = QtGui.QPushButton(self.offlineNoUsersPage)
self.createKeyButton1.setObjectName("createKeyButton1")
self.hboxlayout2.addWidget(self.createKeyButton1)
spacerItem5 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout2.addItem(spacerItem5)
self.vboxlayout3.addLayout(self.hboxlayout2)
self.stack.addWidget(self.offlineNoUsersPage)
self.connectingPage = QtGui.QWidget()
self.connectingPage.setObjectName("connectingPage")
self.vboxlayout4 = QtGui.QVBoxLayout(self.connectingPage)
self.vboxlayout4.setMargin(9)
self.vboxlayout4.setSpacing(6)
self.vboxlayout4.setObjectName("vboxlayout4")
spacerItem6 = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)
self.vboxlayout4.addItem(spacerItem6)
self.hboxlayout3 = QtGui.QHBoxLayout()
self.hboxlayout3.setMargin(0)
self.hboxlayout3.setSpacing(6)
self.hboxlayout3.setObjectName("hboxlayout3")
spacerItem7 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout3.addItem(spacerItem7)
self.vboxlayout5 = QtGui.QVBoxLayout()
self.vboxlayout5.setMargin(0)
self.vboxlayout5.setSpacing(6)
self.vboxlayout5.setObjectName("vboxlayout5")
self.connectStatus = QtGui.QLabel(self.connectingPage)
self.connectStatus.setAlignment(QtCore.Qt.AlignCenter)
self.connectStatus.setObjectName("connectStatus")
self.vboxlayout5.addWidget(self.connectStatus)
self.connectCancelButton = QtGui.QPushButton(self.connectingPage)
self.connectCancelButton.setObjectName("connectCancelButton")
self.vboxlayout5.addWidget(self.connectCancelButton)
self.hboxlayout3.addLayout(self.vboxlayout5)
spacerItem8 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout3.addItem(spacerItem8)
self.vboxlayout4.addLayout(self.hboxlayout3)
spacerItem9 = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)
self.vboxlayout4.addItem(spacerItem9)
self.stack.addWidget(self.connectingPage)
self.vboxlayout.addWidget(self.stack)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0,0,283,21))
self.menubar.setObjectName("menubar")
self.menu_Help = QtGui.QMenu(self.menubar)
self.menu_Help.setObjectName("menu_Help")
self.menuC_ontacts = QtGui.QMenu(self.menubar)
self.menuC_ontacts.setObjectName("menuC_ontacts")
self.menu_CSpace = QtGui.QMenu(self.menubar)
self.menu_CSpace.setObjectName("menu_CSpace")
self.menuO_ptions = QtGui.QMenu(self.menubar)
self.menuO_ptions.setObjectName("menuO_ptions")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setOrientation(QtCore.Qt.Horizontal)
self.toolBar.setIconSize(QtCore.QSize(32,32))
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(self.toolBar)
self.actionCreateKey = QtGui.QAction(MainWindow)
self.actionCreateKey.setIcon(QtGui.QIcon(":/images/register32.png"))
self.actionCreateKey.setObjectName("actionCreateKey")
self.actionGoOnline = QtGui.QAction(MainWindow)
self.actionGoOnline.setIcon(QtGui.QIcon(":/images/connect32.png"))
self.actionGoOnline.setObjectName("actionGoOnline")
self.actionGoOffline = QtGui.QAction(MainWindow)
self.actionGoOffline.setIcon(QtGui.QIcon(":/images/disconnect32.png"))
self.actionGoOffline.setObjectName("actionGoOffline")
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setIcon(QtGui.QIcon(":/images/exit32.png"))
self.actionExit.setObjectName("actionExit")
self.actionAddContact = QtGui.QAction(MainWindow)
self.actionAddContact.setIcon(QtGui.QIcon(":/images/user_add32.png"))
self.actionAddContact.setObjectName("actionAddContact")
self.actionRefreshStatus = QtGui.QAction(MainWindow)
self.actionRefreshStatus.setIcon(QtGui.QIcon(":/images/refresh32.png"))
self.actionRefreshStatus.setObjectName("actionRefreshStatus")
self.actionCheckStatus = QtGui.QAction(MainWindow)
self.actionCheckStatus.setIcon(QtGui.QIcon(":/images/refresh32.png"))
self.actionCheckStatus.setObjectName("actionCheckStatus")
self.actionContactInfo = QtGui.QAction(MainWindow)
self.actionContactInfo.setIcon(QtGui.QIcon(":/images/contact_info32.png"))
self.actionContactInfo.setObjectName("actionContactInfo")
self.actionRemoveContact = QtGui.QAction(MainWindow)
self.actionRemoveContact.setIcon(QtGui.QIcon(":/images/user_remove32.png"))
self.actionRemoveContact.setObjectName("actionRemoveContact")
self.actionEditPermissions = QtGui.QAction(MainWindow)
self.actionEditPermissions.setIcon(QtGui.QIcon(":/images/edit_permissions32.png"))
self.actionEditPermissions.setObjectName("actionEditPermissions")
self.actionAboutCSpace = QtGui.QAction(MainWindow)
self.actionAboutCSpace.setIcon(QtGui.QIcon(":/images/cspace32.png"))
self.actionAboutCSpace.setObjectName("actionAboutCSpace")
self.actionKeyInfo = QtGui.QAction(MainWindow)
self.actionKeyInfo.setIcon(QtGui.QIcon(":/images/key_info32.png"))
self.actionKeyInfo.setObjectName("actionKeyInfo")
self.menu_Help.addAction(self.actionAboutCSpace)
self.menuC_ontacts.addAction(self.actionAddContact)
self.menuC_ontacts.addAction(self.actionRefreshStatus)
self.menu_CSpace.addAction(self.actionGoOnline)
self.menu_CSpace.addAction(self.actionGoOffline)
self.menu_CSpace.addAction(self.actionKeyInfo)
self.menu_CSpace.addSeparator()
self.menu_CSpace.addAction(self.actionCreateKey)
self.menu_CSpace.addSeparator()
self.menu_CSpace.addAction(self.actionExit)
self.menuO_ptions.addAction(self.actionEditPermissions)
self.menubar.addAction(self.menu_CSpace.menuAction())
self.menubar.addAction(self.menuC_ontacts.menuAction())
self.menubar.addAction(self.menuO_ptions.menuAction())
self.menubar.addAction(self.menu_Help.menuAction())
self.toolBar.addAction(self.actionGoOnline)
self.toolBar.addAction(self.actionCreateKey)
self.toolBar.addAction(self.actionGoOffline)
self.toolBar.addAction(self.actionExit)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionAddContact)
self.toolBar.addAction(self.actionRefreshStatus)
self.retranslateUi(MainWindow)
self.stack.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "CSpace", None, QtGui.QApplication.UnicodeUTF8))
self.contacts.clear()
item = QtGui.QListWidgetItem(self.contacts)
item.setText(QtGui.QApplication.translate("MainWindow", "Item 1", None, QtGui.QApplication.UnicodeUTF8))
item.setIcon(QtGui.QIcon(":/images/user_online.png"))
item1 = QtGui.QListWidgetItem(self.contacts)
item1.setText(QtGui.QApplication.translate("MainWindow", "Item 2", None, QtGui.QApplication.UnicodeUTF8))
item1.setIcon(QtGui.QIcon(":/images/user_offline.png"))
item2 = QtGui.QListWidgetItem(self.contacts)
item2.setText(QtGui.QApplication.translate("MainWindow", "Item 3", None, QtGui.QApplication.UnicodeUTF8))
item2.setIcon(QtGui.QIcon(":/images/user_online.png"))
item3 = QtGui.QListWidgetItem(self.contacts)
item3.setText(QtGui.QApplication.translate("MainWindow", "Item 4", None, QtGui.QApplication.UnicodeUTF8))
item3.setIcon(QtGui.QIcon(":/images/user_offline.png"))
self.goOnlineButton.setText(QtGui.QApplication.translate("MainWindow", "Go Online...", None, QtGui.QApplication.UnicodeUTF8))
self.createKeyButton.setText(QtGui.QApplication.translate("MainWindow", "Create Private Key...", None, QtGui.QApplication.UnicodeUTF8))
self.createKeyButton1.setText(QtGui.QApplication.translate("MainWindow", "Create Private Key...", None, QtGui.QApplication.UnicodeUTF8))
self.connectStatus.setText(QtGui.QApplication.translate("MainWindow", "<html><head><meta name=\"qrichtext\" content=\"1\" /></head><body style=\" white-space: pre-wrap; font-family:MS Shell Dlg; font-size:8.25pt; font-weight:400; font-style:normal; text-decoration:none;\"><p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt;\"><span style=\" font-weight:600;\">Connect failed.</span></p><p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-size:8pt; font-weight:600;\">Reconnecting in 30 second(s)...</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.connectCancelButton.setText(QtGui.QApplication.translate("MainWindow", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.menu_Help.setTitle(QtGui.QApplication.translate("MainWindow", "&Help", None, QtGui.QApplication.UnicodeUTF8))
self.menuC_ontacts.setTitle(QtGui.QApplication.translate("MainWindow", "C&ontacts", None, QtGui.QApplication.UnicodeUTF8))
self.menu_CSpace.setTitle(QtGui.QApplication.translate("MainWindow", "&CSpace", None, QtGui.QApplication.UnicodeUTF8))
self.menuO_ptions.setTitle(QtGui.QApplication.translate("MainWindow", "O&ptions", None, QtGui.QApplication.UnicodeUTF8))
self.actionCreateKey.setText(QtGui.QApplication.translate("MainWindow", "&Create Private Key...", None, QtGui.QApplication.UnicodeUTF8))
self.actionCreateKey.setIconText(QtGui.QApplication.translate("MainWindow", "Create Private Key", None, QtGui.QApplication.UnicodeUTF8))
self.actionCreateKey.setToolTip(QtGui.QApplication.translate("MainWindow", "Create Private Key", None, QtGui.QApplication.UnicodeUTF8))
self.actionGoOnline.setText(QtGui.QApplication.translate("MainWindow", "&Go Online...", None, QtGui.QApplication.UnicodeUTF8))
self.actionGoOffline.setText(QtGui.QApplication.translate("MainWindow", "Go &Offline", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit.setText(QtGui.QApplication.translate("MainWindow", "E&xit", None, QtGui.QApplication.UnicodeUTF8))
self.actionAddContact.setText(QtGui.QApplication.translate("MainWindow", "&Add Contact...", None, QtGui.QApplication.UnicodeUTF8))
self.actionAddContact.setIconText(QtGui.QApplication.translate("MainWindow", "Add Contact", None, QtGui.QApplication.UnicodeUTF8))
self.actionAddContact.setToolTip(QtGui.QApplication.translate("MainWindow", "Add Contact", None, QtGui.QApplication.UnicodeUTF8))
self.actionRefreshStatus.setText(QtGui.QApplication.translate("MainWindow", "Refresh &Status", None, QtGui.QApplication.UnicodeUTF8))
self.actionCheckStatus.setText(QtGui.QApplication.translate("MainWindow", "&Check Status", None, QtGui.QApplication.UnicodeUTF8))
self.actionContactInfo.setText(QtGui.QApplication.translate("MainWindow", "Contact &Information...", None, QtGui.QApplication.UnicodeUTF8))
self.actionRemoveContact.setText(QtGui.QApplication.translate("MainWindow", "Remove Contact", None, QtGui.QApplication.UnicodeUTF8))
self.actionEditPermissions.setText(QtGui.QApplication.translate("MainWindow", "&Edit Permissions...", None, QtGui.QApplication.UnicodeUTF8))
self.actionAboutCSpace.setText(QtGui.QApplication.translate("MainWindow", "&About CSpace...", None, QtGui.QApplication.UnicodeUTF8))
self.actionKeyInfo.setText(QtGui.QApplication.translate("MainWindow", "Key Information...", None, QtGui.QApplication.UnicodeUTF8))
| true | true |
1c39a329a1861e2a4b9086d9ce7bc0ccd9a2eb57 | 5,152 | py | Python | src/ebel_rest/manager/statistics.py | e-BE-L/ebel_rest | 67e62fe0d52eb9cd5dba333e2908e5746e4317e7 | [
"MIT"
] | 3 | 2020-04-22T11:16:43.000Z | 2021-09-28T13:49:18.000Z | src/ebel_rest/manager/statistics.py | e-BE-L/ebel_rest | 67e62fe0d52eb9cd5dba333e2908e5746e4317e7 | [
"MIT"
] | null | null | null | src/ebel_rest/manager/statistics.py | e-BE-L/ebel_rest | 67e62fe0d52eb9cd5dba333e2908e5746e4317e7 | [
"MIT"
] | null | null | null | from collections import Counter
import pandas as pd
from ebel_rest.manager.core import Statistics
from ebel_rest.manager import ss_functions
def summarize() -> Statistics:
"""Returns summary statistics on the graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_SUMMARIZE)
def publication_by_year() -> Statistics:
"""Returns statistics on the number of publications per year in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_PUBLICATION_BY_YEAR)
def publication_by_number_of_statements() -> Statistics:
"""Returns statistics on the number of statements per publication in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_PUBLICATION_BY_NUMBER_OF_STATEMENTS)
def last_author_by_number_of_publications() -> Statistics:
"""Returns statistics on the number of publications per author in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_LAST_AUTHOR_BY_NUMBER_OF_PUBLICATIONS)
def last_author_by_number_of_statements() -> Statistics():
"""Returns statistics on the number of statements per author in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_LAST_AUTHOR_BY_NUMBER_OF_STATEMENTS)
def namespace_by_count() -> Statistics:
"""Returns the number of nodes for each namespace in KG."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NAMESPACE_COUNT)
def node_namespace_order_by_count() -> Statistics():
"""Returns statistics on the frequency of each node type and each namespace in the knowledge graph
in order of count."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NODE_NAMESPACE_ORDER_BY_COUNT)
def node_namespace_order_by_namespace() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NODE_NAMESPACE_ORDER_BY_NAMESPACE)
def edges() -> Statistics():
"""Returns statistics on the frequency of each edge type in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_EDGES)
def nodes() -> Statistics():
"""Returns statistics on the frequency of each node type in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NODES)
def total_bel_nodes() -> Statistics():
"""Returns the total number of nodes generated from curated statements in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_TOTAL_BEL_NODES)
def total_bel_edges() -> Statistics():
"""Returns the total number of BEL curated edges in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_TOTAL_STATEMENTS)
def total_publications() -> Statistics():
"""Returns the total number of publications in the knowledge graph."""
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_TOTAL_PUBLICATIONS)
def subgraphs() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_SUBGRAPH)
# def edges_by_pmid(pivot: bool = False):
# """Returns statistics on the frequency of each edge type for each PMID in the knowledge graph.
#
# Parameters
# ----------
# pivot: bool
# Pivots the generated pandas DataFrame to split the 3 columns (PMID, edge_type, and edge counts) into a new
# DataFrame in which the rows are unique PMIDs and the columns are the different edge types present in the
# knowledge graph.
#
# If True, this method will return the new DataFrame instead of a Statistics object.
#
# Returns
# -------
# Statistics or pandas.DataFrame
# """
# stats = Statistics().apply_api_function('bel_statistics_edges_by_pmid')
# if pivot:
# return stats.table.pivot('pmid', 'edge_type', 'count').fillna(0).astype('int')
#
# else:
# return stats
#
#
# def nodes_by_pmid(pivot: bool = False):
# """Returns statistics on the frequency of each node type for each PMID in the knowledge graph.
#
# Parameters
# ----------
# pivot: bool
# Pivots the generated pandas DataFrame to split the 3 columns (PMID, Node Type, and node counts) into a new
# DataFrame in which the rows are unique PMIDs and the columns are the different edge types present in the
# knowledge graph.
#
# If True, this method will return the new DataFrame instead of a Statistics object.
#
# Returns
# -------
# Statistics or pandas.DataFrame
# """
# stats = Statistics().apply_api_function('bel_statistics_nodes_by_pmid')
# if pivot:
# data = {'pmid': [], 'Node Type': [], 'counts': []}
# for row in stats.table.itertuples():
# counts = Counter(row.nodes)
# data['pmid'] += (len(counts) * [row.pmid])
# data['Node Type'] += (counts.keys())
# data['counts'] += (counts.values())
#
# return pd.DataFrame(data).pivot('pmid', 'Node Type', 'counts').fillna(0).astype('int')
#
# else:
# return stats
| 39.328244 | 116 | 0.722826 | from collections import Counter
import pandas as pd
from ebel_rest.manager.core import Statistics
from ebel_rest.manager import ss_functions
def summarize() -> Statistics:
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_SUMMARIZE)
def publication_by_year() -> Statistics:
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_PUBLICATION_BY_YEAR)
def publication_by_number_of_statements() -> Statistics:
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_PUBLICATION_BY_NUMBER_OF_STATEMENTS)
def last_author_by_number_of_publications() -> Statistics:
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_LAST_AUTHOR_BY_NUMBER_OF_PUBLICATIONS)
def last_author_by_number_of_statements() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_LAST_AUTHOR_BY_NUMBER_OF_STATEMENTS)
def namespace_by_count() -> Statistics:
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NAMESPACE_COUNT)
def node_namespace_order_by_count() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NODE_NAMESPACE_ORDER_BY_COUNT)
def node_namespace_order_by_namespace() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NODE_NAMESPACE_ORDER_BY_NAMESPACE)
def edges() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_EDGES)
def nodes() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_NODES)
def total_bel_nodes() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_TOTAL_BEL_NODES)
def total_bel_edges() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_TOTAL_STATEMENTS)
def total_publications() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_TOTAL_PUBLICATIONS)
def subgraphs() -> Statistics():
return Statistics().apply_api_function(ss_functions.BEL_STATISTICS_SUBGRAPH)
#
# Parameters
# ----------
# pivot: bool
# Pivots the generated pandas DataFrame to split the 3 columns (PMID, edge_type, and edge counts) into a new
# DataFrame in which the rows are unique PMIDs and the columns are the different edge types present in the
# knowledge graph.
#
# If True, this method will return the new DataFrame instead of a Statistics object.
#
# Returns
# -------
# Statistics or pandas.DataFrame
# """
#
# Parameters
# ----------
# pivot: bool
# Pivots the generated pandas DataFrame to split the 3 columns (PMID, Node Type, and node counts) into a new
# DataFrame in which the rows are unique PMIDs and the columns are the different edge types present in the
# knowledge graph.
#
# If True, this method will return the new DataFrame instead of a Statistics object.
#
# Returns
# -------
# Statistics or pandas.DataFrame
# """
| true | true |
1c39a57b6f5ec4a1121a1d80e025e2c081812e38 | 1,779 | py | Python | sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/network_interface_py3.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/network_interface_py3.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/network_interface_py3.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NetworkInterface(Model):
"""Network details of the environment.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar private_ip_address: PrivateIp address of the Compute VM
:vartype private_ip_address: str
:ivar ssh_authority: Connection information for Linux
:vartype ssh_authority: str
:ivar rdp_authority: Connection information for Windows
:vartype rdp_authority: str
:ivar username: Username of the VM
:vartype username: str
"""
_validation = {
'private_ip_address': {'readonly': True},
'ssh_authority': {'readonly': True},
'rdp_authority': {'readonly': True},
'username': {'readonly': True},
}
_attribute_map = {
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'ssh_authority': {'key': 'sshAuthority', 'type': 'str'},
'rdp_authority': {'key': 'rdpAuthority', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(NetworkInterface, self).__init__(**kwargs)
self.private_ip_address = None
self.ssh_authority = None
self.rdp_authority = None
self.username = None
| 34.882353 | 76 | 0.609893 |
from msrest.serialization import Model
class NetworkInterface(Model):
_validation = {
'private_ip_address': {'readonly': True},
'ssh_authority': {'readonly': True},
'rdp_authority': {'readonly': True},
'username': {'readonly': True},
}
_attribute_map = {
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'ssh_authority': {'key': 'sshAuthority', 'type': 'str'},
'rdp_authority': {'key': 'rdpAuthority', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(NetworkInterface, self).__init__(**kwargs)
self.private_ip_address = None
self.ssh_authority = None
self.rdp_authority = None
self.username = None
| true | true |
1c39a6a1da67e5ad4c9b2e539886f9b75b87bfe1 | 2,455 | py | Python | Chapter15/Using PostgreSQL in Python applications/sqlalchemy_orm.py | MLikeWater/Learning-PostgreSQL-11-Third-Edition | 6f4414c2a0edb1bc0e280ca4d1589b89b7205671 | [
"MIT"
] | 19 | 2019-01-30T14:12:27.000Z | 2021-10-05T19:45:42.000Z | Chapter15/Using PostgreSQL in Python applications/sqlalchemy_orm.py | PacktPublishing/Learning-PostgreSQL-11-Third-Edition | 134ad3efd3b1f3bd491daf1d145676e0977b9d8e | [
"MIT"
] | null | null | null | Chapter15/Using PostgreSQL in Python applications/sqlalchemy_orm.py | PacktPublishing/Learning-PostgreSQL-11-Third-Edition | 134ad3efd3b1f3bd491daf1d145676e0977b9d8e | [
"MIT"
] | 21 | 2018-11-20T11:44:50.000Z | 2022-01-19T21:19:38.000Z | #!/usr/bin/python3
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
engine = create_engine(
"postgresql+pg8000://car_portal_app@localhost/car_portal",
echo=False)
Base = declarative_base()
Session = sessionmaker(bind=engine)
class Car_model(Base):
__tablename__ = "car_model"
__table_args__ = {'schema': 'car_portal_app'}
car_model_id = Column(Integer, primary_key=True)
make = Column(String)
model = Column(String)
class Car(Base):
__tablename__ = "car"
__table_args__ = {'schema': 'car_portal_app'}
car_id = Column(Integer, primary_key=True)
number_of_owners = Column(Integer, nullable=False)
registration_number = Column(String, nullable=False)
manufacture_year = Column(Integer, nullable=False)
number_of_doors = Column(Integer, nullable=False)
car_model_id = Column(Integer, ForeignKey(Car_model.car_model_id),
nullable=False)
mileage = Column(Integer)
car_model = relationship(Car_model)
def __repr__(self):
car_text = ("Car: ID={}, {} {}, Registration plate: '{}', "
"Number of owners: {}, Manufacture year: {}, "
"Number of doors: {}".format(
self.car_id, self.car_model.make, self.car_model.model,
self.registration_number,
self.number_of_owners, self.manufacture_year,
self.number_of_doors))
if self.mileage is not None:
car_text = car_text + ", Mileage: {}".format(self.mileage)
return car_text
# Query cars
session = Session()
query = session.query(Car).order_by(Car.car_id).limit(5)
for car in query.all():
print(car)
# Update a car
car = query.first()
car.registration_number = 'BOND007'
print(query.first())
session.commit()
session.close()
# Create a new car model
new_car_model = Car_model(make="Jaguar", model="XE")
session = Session()
session.add(new_car_model)
session.commit()
print("Created {} {}".format(new_car_model.make, new_car_model.model))
session.close()
# Delete a car model
session = Session()
old_car_model = session.query(Car_model).filter(
and_(Car_model.make == "Jaguar", Car_model.model == "XE")).one()
session.delete(old_car_model)
session.commit()
print("Removed {} {}".format(new_car_model.make, new_car_model.model))
session.close()
| 29.939024 | 79 | 0.672912 |
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
engine = create_engine(
"postgresql+pg8000://car_portal_app@localhost/car_portal",
echo=False)
Base = declarative_base()
Session = sessionmaker(bind=engine)
class Car_model(Base):
__tablename__ = "car_model"
__table_args__ = {'schema': 'car_portal_app'}
car_model_id = Column(Integer, primary_key=True)
make = Column(String)
model = Column(String)
class Car(Base):
__tablename__ = "car"
__table_args__ = {'schema': 'car_portal_app'}
car_id = Column(Integer, primary_key=True)
number_of_owners = Column(Integer, nullable=False)
registration_number = Column(String, nullable=False)
manufacture_year = Column(Integer, nullable=False)
number_of_doors = Column(Integer, nullable=False)
car_model_id = Column(Integer, ForeignKey(Car_model.car_model_id),
nullable=False)
mileage = Column(Integer)
car_model = relationship(Car_model)
def __repr__(self):
car_text = ("Car: ID={}, {} {}, Registration plate: '{}', "
"Number of owners: {}, Manufacture year: {}, "
"Number of doors: {}".format(
self.car_id, self.car_model.make, self.car_model.model,
self.registration_number,
self.number_of_owners, self.manufacture_year,
self.number_of_doors))
if self.mileage is not None:
car_text = car_text + ", Mileage: {}".format(self.mileage)
return car_text
session = Session()
query = session.query(Car).order_by(Car.car_id).limit(5)
for car in query.all():
print(car)
car = query.first()
car.registration_number = 'BOND007'
print(query.first())
session.commit()
session.close()
new_car_model = Car_model(make="Jaguar", model="XE")
session = Session()
session.add(new_car_model)
session.commit()
print("Created {} {}".format(new_car_model.make, new_car_model.model))
session.close()
session = Session()
old_car_model = session.query(Car_model).filter(
and_(Car_model.make == "Jaguar", Car_model.model == "XE")).one()
session.delete(old_car_model)
session.commit()
print("Removed {} {}".format(new_car_model.make, new_car_model.model))
session.close()
| true | true |
1c39a6a43a52b34276fae2d57bc088da4f6d8c5e | 698 | py | Python | wangzhansen/20180409/20180409h4.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | wangzhansen/20180409/20180409h4.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | wangzhansen/20180409/20180409h4.py | python20180319howmework/homework | c826d7aa4c52f8d22f739feb134d20f0b2c217cd | [
"Apache-2.0"
] | null | null | null | '''
4.现有两个进程,1号进程向文件写入10个"hello" 2号进程向文件写入10个"world",两个进程并发执行
如何使得文件中的内容一定是"hellohellohellohellohello.....worldworldworld......."
'''
from multiprocessing import Process, Pool, Lock
import os
def newprocess(flname, mylock):
with mylock:
f = open(flname,"r+")
fistline = f.readline()
fistline = str("hello" * 10)
f.seek(0,0)
f.write(fistline)
f.close()
if __name__ == '__main__':
f = open("./yourprocess.txt", "w")
f.write(",")
f.close()
mylock = Lock()
p = Process (target= newprocess, args= ("./yourprocess.txt", mylock))
p.start()
q = Process (target= newprocess, args= ("./yourprocess.txt", mylock))
q.start() | 27.92 | 73 | 0.611748 | from multiprocessing import Process, Pool, Lock
import os
def newprocess(flname, mylock):
with mylock:
f = open(flname,"r+")
fistline = f.readline()
fistline = str("hello" * 10)
f.seek(0,0)
f.write(fistline)
f.close()
if __name__ == '__main__':
f = open("./yourprocess.txt", "w")
f.write(",")
f.close()
mylock = Lock()
p = Process (target= newprocess, args= ("./yourprocess.txt", mylock))
p.start()
q = Process (target= newprocess, args= ("./yourprocess.txt", mylock))
q.start() | true | true |
1c39a74be9aad9519c7a6d386c62be44d4926b3c | 848 | py | Python | nessus/file.py | tharvik/nessus | 4551c319ac6cb3026ddb096a0f6f71f060a578ab | [
"CC0-1.0"
] | null | null | null | nessus/file.py | tharvik/nessus | 4551c319ac6cb3026ddb096a0f6f71f060a578ab | [
"CC0-1.0"
] | null | null | null | nessus/file.py | tharvik/nessus | 4551c319ac6cb3026ddb096a0f6f71f060a578ab | [
"CC0-1.0"
] | null | null | null | from uuid import uuid4
from nessus.base import LibNessusBase
class NessusFile:
def __init__(self, path: str) -> None:
self.path = path
class NessusRemoteFile:
def __init__(self, name: str) -> None:
self.name = name
class LibNessusFile(LibNessusBase):
def upload(self, nessus_file: NessusFile) -> NessusRemoteFile:
"""
Uploads a file.
~lies: the data field name 'Filedata' is not documented in Nessus
:param nessus_file: file to upload
:return: the filename on nessus
"""
with open(nessus_file.path, 'rb') as io:
filename = str(uuid4())
files = {'Filedata': (filename, io)}
ans = self._post(path='file/upload', files=files)
filename = ans.json()['fileuploaded']
return NessusRemoteFile(filename)
| 26.5 | 73 | 0.615566 | from uuid import uuid4
from nessus.base import LibNessusBase
class NessusFile:
def __init__(self, path: str) -> None:
self.path = path
class NessusRemoteFile:
def __init__(self, name: str) -> None:
self.name = name
class LibNessusFile(LibNessusBase):
def upload(self, nessus_file: NessusFile) -> NessusRemoteFile:
with open(nessus_file.path, 'rb') as io:
filename = str(uuid4())
files = {'Filedata': (filename, io)}
ans = self._post(path='file/upload', files=files)
filename = ans.json()['fileuploaded']
return NessusRemoteFile(filename)
| true | true |
1c39a7d4ab1265740970543e029e1ae8f390e384 | 3,914 | py | Python | Lib/site-packages/pymongo/settings.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 32 | 2017-08-29T08:57:16.000Z | 2021-04-21T08:53:04.000Z | Lib/site-packages/pymongo/settings.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 108 | 2017-09-04T19:59:28.000Z | 2022-03-31T08:12:07.000Z | Lib/site-packages/pymongo/settings.py | inging44/python3 | fcd8d9d2ee54b46b757ecf34f284b4e60a43097a | [
"bzip2-1.0.6"
] | 14 | 2017-07-06T09:35:30.000Z | 2021-06-03T19:41:48.000Z | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Represent MongoClient's configuration."""
import threading
from bson.objectid import ObjectId
from pymongo import common, monitor, pool
from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT
from pymongo.errors import ConfigurationError
from pymongo.topology_description import TOPOLOGY_TYPE
from pymongo.pool import PoolOptions
from pymongo.server_description import ServerDescription
class TopologySettings(object):
def __init__(self,
seeds=None,
replica_set_name=None,
pool_class=None,
pool_options=None,
monitor_class=None,
condition_class=None,
local_threshold_ms=LOCAL_THRESHOLD_MS,
server_selection_timeout=SERVER_SELECTION_TIMEOUT,
heartbeat_frequency=common.HEARTBEAT_FREQUENCY):
"""Represent MongoClient's configuration.
Take a list of (host, port) pairs and optional replica set name.
"""
if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL:
raise ConfigurationError(
"heartbeatFrequencyMS cannot be less than %d" %
common.MIN_HEARTBEAT_INTERVAL * 1000)
self._seeds = seeds or [('localhost', 27017)]
self._replica_set_name = replica_set_name
self._pool_class = pool_class or pool.Pool
self._pool_options = pool_options or PoolOptions()
self._monitor_class = monitor_class or monitor.Monitor
self._condition_class = condition_class or threading.Condition
self._local_threshold_ms = local_threshold_ms
self._server_selection_timeout = server_selection_timeout
self._heartbeat_frequency = heartbeat_frequency
self._direct = (len(self._seeds) == 1 and not replica_set_name)
self._topology_id = ObjectId()
@property
def seeds(self):
"""List of server addresses."""
return self._seeds
@property
def replica_set_name(self):
return self._replica_set_name
@property
def pool_class(self):
return self._pool_class
@property
def pool_options(self):
return self._pool_options
@property
def monitor_class(self):
return self._monitor_class
@property
def condition_class(self):
return self._condition_class
@property
def local_threshold_ms(self):
return self._local_threshold_ms
@property
def server_selection_timeout(self):
return self._server_selection_timeout
@property
def heartbeat_frequency(self):
return self._heartbeat_frequency
@property
def direct(self):
"""Connect directly to a single server, or use a set of servers?
True if there is one seed and no replica_set_name.
"""
return self._direct
def get_topology_type(self):
if self.direct:
return TOPOLOGY_TYPE.Single
elif self.replica_set_name is not None:
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
return TOPOLOGY_TYPE.Unknown
def get_server_descriptions(self):
"""Initial dict of (address, ServerDescription) for all seeds."""
return dict([
(address, ServerDescription(address))
for address in self.seeds])
| 33.169492 | 73 | 0.686765 |
import threading
from bson.objectid import ObjectId
from pymongo import common, monitor, pool
from pymongo.common import LOCAL_THRESHOLD_MS, SERVER_SELECTION_TIMEOUT
from pymongo.errors import ConfigurationError
from pymongo.topology_description import TOPOLOGY_TYPE
from pymongo.pool import PoolOptions
from pymongo.server_description import ServerDescription
class TopologySettings(object):
def __init__(self,
seeds=None,
replica_set_name=None,
pool_class=None,
pool_options=None,
monitor_class=None,
condition_class=None,
local_threshold_ms=LOCAL_THRESHOLD_MS,
server_selection_timeout=SERVER_SELECTION_TIMEOUT,
heartbeat_frequency=common.HEARTBEAT_FREQUENCY):
if heartbeat_frequency < common.MIN_HEARTBEAT_INTERVAL:
raise ConfigurationError(
"heartbeatFrequencyMS cannot be less than %d" %
common.MIN_HEARTBEAT_INTERVAL * 1000)
self._seeds = seeds or [('localhost', 27017)]
self._replica_set_name = replica_set_name
self._pool_class = pool_class or pool.Pool
self._pool_options = pool_options or PoolOptions()
self._monitor_class = monitor_class or monitor.Monitor
self._condition_class = condition_class or threading.Condition
self._local_threshold_ms = local_threshold_ms
self._server_selection_timeout = server_selection_timeout
self._heartbeat_frequency = heartbeat_frequency
self._direct = (len(self._seeds) == 1 and not replica_set_name)
self._topology_id = ObjectId()
@property
def seeds(self):
return self._seeds
@property
def replica_set_name(self):
return self._replica_set_name
@property
def pool_class(self):
return self._pool_class
@property
def pool_options(self):
return self._pool_options
@property
def monitor_class(self):
return self._monitor_class
@property
def condition_class(self):
return self._condition_class
@property
def local_threshold_ms(self):
return self._local_threshold_ms
@property
def server_selection_timeout(self):
return self._server_selection_timeout
@property
def heartbeat_frequency(self):
return self._heartbeat_frequency
@property
def direct(self):
return self._direct
def get_topology_type(self):
if self.direct:
return TOPOLOGY_TYPE.Single
elif self.replica_set_name is not None:
return TOPOLOGY_TYPE.ReplicaSetNoPrimary
else:
return TOPOLOGY_TYPE.Unknown
def get_server_descriptions(self):
return dict([
(address, ServerDescription(address))
for address in self.seeds])
| true | true |
1c39a8d92caa68d04b7ab0b59f966fe8d002a086 | 4,049 | py | Python | mmdet/core/bbox/samplers/base_sampler.py | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 | [
"Apache-2.0"
] | 16 | 2021-03-02T07:41:01.000Z | 2022-03-14T08:55:45.000Z | mmdet/core/bbox/samplers/base_sampler.py | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 | [
"Apache-2.0"
] | 2 | 2022-01-06T20:54:13.000Z | 2022-02-24T03:50:51.000Z | mmdet/core/bbox/samplers/base_sampler.py | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 | [
"Apache-2.0"
] | 2 | 2021-05-26T19:23:35.000Z | 2022-01-06T20:30:24.000Z | from abc import ABCMeta, abstractmethod
import torch
from .sampling_result import SamplingResult
class BaseSampler(metaclass=ABCMeta):
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result, num_expected, **kwargs):
pass
@abstractmethod
def _sample_neg(self, assign_result, num_expected, **kwargs):
pass
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
**kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
:obj:`SamplingResult`: Sampling result.
"""
if isinstance(gt_bboxes, list) and len(gt_bboxes) == 1:
gt_bboxes = gt_bboxes[0]
if isinstance(gt_labels, list) and len(gt_labels) == 1:
gt_labels = gt_labels[0]
# scores = None
if bboxes.shape[1] >= 6:
# if bboxes.shape[1] == 7:
# # sampling proposals for bbox head
# scores = bboxes[:, 6]
bboxes = bboxes[:, :6]
elif bboxes.shape[1] >= 4:
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals:
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
# Hard negative mining: half of neg_inds will be chosen based on the highest scores
# sampling proposals for bbox head
# if scores is not None:
# num_expected_neg_first_half = int(round(num_expected_neg / 2))
# _, topk_inds = scores.topk(num_expected_neg_first_half)
# topk_inds = topk_inds + gt_bboxes.shape[0] # account for gt_bboxes in the front, so push back index by number of gt_bboxes
# neg_inds = torch.cat((topk_inds, neg_inds))
# neg_inds = torch.unique(neg_inds.cpu(), sorted=False).to(neg_inds.device)
# # topk_inds begin at the back so flipping is required
# neg_inds = torch.flip(neg_inds, [0])
# neg_inds = neg_inds[:num_expected_neg]
# else:
# neg_inds = neg_inds.unique()
neg_inds = neg_inds.unique()
return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
| 38.561905 | 136 | 0.602371 | from abc import ABCMeta, abstractmethod
import torch
from .sampling_result import SamplingResult
class BaseSampler(metaclass=ABCMeta):
def __init__(self,
num,
pos_fraction,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
self.num = num
self.pos_fraction = pos_fraction
self.neg_pos_ub = neg_pos_ub
self.add_gt_as_proposals = add_gt_as_proposals
self.pos_sampler = self
self.neg_sampler = self
@abstractmethod
def _sample_pos(self, assign_result, num_expected, **kwargs):
pass
@abstractmethod
def _sample_neg(self, assign_result, num_expected, **kwargs):
pass
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
**kwargs):
if isinstance(gt_bboxes, list) and len(gt_bboxes) == 1:
gt_bboxes = gt_bboxes[0]
if isinstance(gt_labels, list) and len(gt_labels) == 1:
gt_labels = gt_labels[0]
if bboxes.shape[1] >= 6:
bboxes[:, :6]
elif bboxes.shape[1] >= 4:
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals:
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
| true | true |
1c39a9c1963840994365920d42980436d8d9a731 | 1,350 | py | Python | coverage/disposition.py | timofurrer/coveragepy | 72e9761ee79eb2f5b61b21a5427e07fff6acd400 | [
"Apache-2.0"
] | 2,254 | 2015-01-05T01:28:03.000Z | 2022-03-29T10:37:10.000Z | coverage/disposition.py | timofurrer/coveragepy | 72e9761ee79eb2f5b61b21a5427e07fff6acd400 | [
"Apache-2.0"
] | 707 | 2015-02-07T01:32:02.000Z | 2022-03-31T18:00:14.000Z | coverage/disposition.py | sitedata/coveragepy | e4f0f9ee71a1ade66b51ec53d0061f462e3838cb | [
"Apache-2.0"
] | 439 | 2015-01-16T15:06:08.000Z | 2022-03-30T06:19:12.000Z | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Simple value objects for tracking what to do with files."""
class FileDisposition:
"""A simple value type for recording what to do with a file."""
pass
# FileDisposition "methods": FileDisposition is a pure value object, so it can
# be implemented in either C or Python. Acting on them is done with these
# functions.
def disposition_init(cls, original_filename):
"""Construct and initialize a new FileDisposition object."""
disp = cls()
disp.original_filename = original_filename
disp.canonical_filename = original_filename
disp.source_filename = None
disp.trace = False
disp.reason = ""
disp.file_tracer = None
disp.has_dynamic_filename = False
return disp
def disposition_debug_msg(disp):
"""Make a nice debug message of what the FileDisposition is doing."""
if disp.trace:
msg = f"Tracing {disp.original_filename!r}"
if disp.original_filename != disp.source_filename:
msg += f" as {disp.source_filename!r}"
if disp.file_tracer:
msg += ": will be traced by %r" % disp.file_tracer
else:
msg = f"Not tracing {disp.original_filename!r}: {disp.reason}"
return msg
| 33.75 | 79 | 0.696296 |
class FileDisposition:
pass
def disposition_init(cls, original_filename):
disp = cls()
disp.original_filename = original_filename
disp.canonical_filename = original_filename
disp.source_filename = None
disp.trace = False
disp.reason = ""
disp.file_tracer = None
disp.has_dynamic_filename = False
return disp
def disposition_debug_msg(disp):
if disp.trace:
msg = f"Tracing {disp.original_filename!r}"
if disp.original_filename != disp.source_filename:
msg += f" as {disp.source_filename!r}"
if disp.file_tracer:
msg += ": will be traced by %r" % disp.file_tracer
else:
msg = f"Not tracing {disp.original_filename!r}: {disp.reason}"
return msg
| true | true |
1c39aa0de256edb03c668ae411a19be0cd31aeec | 237,033 | py | Python | mypy/semanal.py | linw1995/mypy | 2c909125e6fdc7f8855ead6b0091fc2a878d4462 | [
"PSF-2.0"
] | null | null | null | mypy/semanal.py | linw1995/mypy | 2c909125e6fdc7f8855ead6b0091fc2a878d4462 | [
"PSF-2.0"
] | null | null | null | mypy/semanal.py | linw1995/mypy | 2c909125e6fdc7f8855ead6b0091fc2a878d4462 | [
"PSF-2.0"
] | 1 | 2022-01-31T13:24:43.000Z | 2022-01-31T13:24:43.000Z | """The semantic analyzer.
Bind names to definitions and do various other simple consistency
checks. Populate symbol tables. The semantic analyzer also detects
special forms which reuse generic syntax such as NamedTuple and
cast(). Multiple analysis iterations may be needed to analyze forward
references and import cycles. Each iteration "fills in" additional
bindings and references until everything has been bound.
For example, consider this program:
x = 1
y = x
Here semantic analysis would detect that the assignment 'x = 1'
defines a new variable, the type of which is to be inferred (in a
later pass; type inference or type checking is not part of semantic
analysis). Also, it would bind both references to 'x' to the same
module-level variable (Var) node. The second assignment would also
be analyzed, and the type of 'y' marked as being inferred.
Semantic analysis of types is implemented in typeanal.py.
See semanal_main.py for the top-level logic.
Some important properties:
* After semantic analysis is complete, no PlaceholderNode and
PlaceholderType instances should remain. During semantic analysis,
if we encounter one of these, the current target should be deferred.
* A TypeInfo is only created once we know certain basic information about
a type, such as the MRO, existence of a Tuple base class (e.g., for named
tuples), and whether we have a TypedDict. We use a temporary
PlaceholderNode node in the symbol table if some such information is
missing.
* For assignments, we only add a non-placeholder symbol table entry once
we know the sort of thing being defined (variable, NamedTuple, type alias,
etc.).
* Every part of the analysis step must support multiple iterations over
the same AST nodes, and each iteration must be able to fill in arbitrary
things that were missing or incomplete in previous iterations.
* Changes performed by the analysis need to be reversible, since mypy
daemon strips and reuses existing ASTs (to improve performance and/or
reduce memory use).
"""
from contextlib import contextmanager
from typing import (
List, Dict, Set, Tuple, cast, TypeVar, Union, Optional, Callable, Iterator, Iterable
)
from typing_extensions import Final
from mypy.nodes import (
MypyFile, TypeInfo, Node, AssignmentStmt, FuncDef, OverloadedFuncDef,
ClassDef, Var, GDEF, FuncItem, Import, Expression, Lvalue,
ImportFrom, ImportAll, Block, LDEF, NameExpr, MemberExpr,
IndexExpr, TupleExpr, ListExpr, ExpressionStmt, ReturnStmt,
RaiseStmt, AssertStmt, OperatorAssignmentStmt, WhileStmt,
ForStmt, BreakStmt, ContinueStmt, IfStmt, TryStmt, WithStmt, DelStmt,
GlobalDecl, SuperExpr, DictExpr, CallExpr, RefExpr, OpExpr, UnaryExpr,
SliceExpr, CastExpr, RevealExpr, TypeApplication, Context, SymbolTable,
SymbolTableNode, ListComprehension, GeneratorExpr,
LambdaExpr, MDEF, Decorator, SetExpr, TypeVarExpr,
StrExpr, BytesExpr, PrintStmt, ConditionalExpr, PromoteExpr,
ComparisonExpr, StarExpr, ArgKind, ARG_POS, ARG_NAMED, type_aliases,
YieldFromExpr, NamedTupleExpr, NonlocalDecl, SymbolNode,
SetComprehension, DictionaryComprehension, TypeAlias, TypeAliasExpr,
YieldExpr, ExecStmt, BackquoteExpr, ImportBase, AwaitExpr,
IntExpr, FloatExpr, UnicodeExpr, TempNode, OverloadPart,
PlaceholderNode, COVARIANT, CONTRAVARIANT, INVARIANT,
get_nongen_builtins, get_member_expr_fullname, REVEAL_TYPE,
REVEAL_LOCALS, is_final_node, TypedDictExpr, type_aliases_source_versions,
EnumCallExpr, RUNTIME_PROTOCOL_DECOS, FakeExpression, Statement, AssignmentExpr,
ParamSpecExpr, EllipsisExpr
)
from mypy.tvar_scope import TypeVarLikeScope
from mypy.typevars import fill_typevars
from mypy.visitor import NodeVisitor
from mypy.errors import Errors, report_internal_error
from mypy.messages import (
best_matches, MessageBuilder, pretty_seq, SUGGESTED_TEST_FIXTURES, TYPES_FOR_UNIMPORTED_HINTS
)
from mypy.errorcodes import ErrorCode
from mypy import message_registry, errorcodes as codes
from mypy.types import (
FunctionLike, UnboundType, TypeVarDef, TupleType, UnionType, StarType,
CallableType, Overloaded, Instance, Type, AnyType, LiteralType, LiteralValue,
TypeTranslator, TypeOfAny, TypeType, NoneType, PlaceholderType, TPDICT_NAMES, ProperType,
get_proper_type, get_proper_types, TypeAliasType
)
from mypy.typeops import function_type
from mypy.type_visitor import TypeQuery
from mypy.nodes import implicit_module_attrs
from mypy.typeanal import (
TypeAnalyser, analyze_type_alias, no_subscript_builtin_alias,
TypeVarLikeQuery, TypeVarLikeList, remove_dups, has_any_from_unimported_type,
check_for_explicit_any, type_constructors, fix_instance_types
)
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.plugin import (
Plugin, ClassDefContext, SemanticAnalyzerPluginInterface,
DynamicClassDefContext
)
from mypy.util import (
correct_relative_import, unmangle, module_prefix, is_typeshed_file, unnamed_function,
)
from mypy.scope import Scope
from mypy.semanal_shared import (
SemanticAnalyzerInterface, set_callable_name, calculate_tuple_fallback, PRIORITY_FALLBACKS
)
from mypy.semanal_namedtuple import NamedTupleAnalyzer
from mypy.semanal_typeddict import TypedDictAnalyzer
from mypy.semanal_enum import EnumCallAnalyzer
from mypy.semanal_newtype import NewTypeAnalyzer
from mypy.reachability import (
infer_reachability_of_if_statement, infer_condition_value, ALWAYS_FALSE, ALWAYS_TRUE,
MYPY_TRUE, MYPY_FALSE
)
from mypy.mro import calculate_mro, MroError
T = TypeVar('T')
FUTURE_IMPORTS: Final = {
'__future__.nested_scopes': 'nested_scopes',
'__future__.generators': 'generators',
'__future__.division': 'division',
'__future__.absolute_import': 'absolute_import',
'__future__.with_statement': 'with_statement',
'__future__.print_function': 'print_function',
'__future__.unicode_literals': 'unicode_literals',
'__future__.barry_as_FLUFL': 'barry_as_FLUFL',
'__future__.generator_stop': 'generator_stop',
'__future__.annotations': 'annotations',
}
# Special cased built-in classes that are needed for basic functionality and need to be
# available very early on.
CORE_BUILTIN_CLASSES: Final = ["object", "bool", "function"]
# Used for tracking incomplete references
Tag = int
class SemanticAnalyzer(NodeVisitor[None],
SemanticAnalyzerInterface,
SemanticAnalyzerPluginInterface):
"""Semantically analyze parsed mypy files.
The analyzer binds names and does various consistency checks for an
AST. Note that type checking is performed as a separate pass.
"""
__deletable__ = ['patches', 'options', 'cur_mod_node']
# Module name space
modules: Dict[str, MypyFile]
# Global name space for current module
globals: SymbolTable
# Names declared using "global" (separate set for each scope)
global_decls: List[Set[str]]
# Names declared using "nonlocal" (separate set for each scope)
nonlocal_decls: List[Set[str]]
# Local names of function scopes; None for non-function scopes.
locals: List[Optional[SymbolTable]]
# Whether each scope is a comprehension scope.
is_comprehension_stack: List[bool]
# Nested block depths of scopes
block_depth: List[int]
# TypeInfo of directly enclosing class (or None)
type: Optional[TypeInfo] = None
# Stack of outer classes (the second tuple item contains tvars).
type_stack: List[Optional[TypeInfo]]
# Type variables bound by the current scope, be it class or function
tvar_scope: TypeVarLikeScope
# Per-module options
options: Options
# Stack of functions being analyzed
function_stack: List[FuncItem]
# Set to True if semantic analysis defines a name, or replaces a
# placeholder definition. If some iteration makes no progress,
# there can be at most one additional final iteration (see below).
progress = False
deferred = False # Set to true if another analysis pass is needed
incomplete = False # Set to true if current module namespace is missing things
# Is this the final iteration of semantic analysis (where we report
# unbound names due to cyclic definitions and should not defer)?
_final_iteration = False
# These names couldn't be added to the symbol table due to incomplete deps.
# Note that missing names are per module, _not_ per namespace. This means that e.g.
# a missing name at global scope will block adding same name at a class scope.
# This should not affect correctness and is purely a performance issue,
# since it can cause unnecessary deferrals. These are represented as
# PlaceholderNodes in the symbol table. We use this to ensure that the first
# definition takes precedence even if it's incomplete.
#
# Note that a star import adds a special name '*' to the set, this blocks
# adding _any_ names in the current file.
missing_names: List[Set[str]]
# Callbacks that will be called after semantic analysis to tweak things.
patches: List[Tuple[int, Callable[[], None]]]
loop_depth = 0 # Depth of breakable loops
cur_mod_id = '' # Current module id (or None) (phase 2)
_is_stub_file = False # Are we analyzing a stub file?
_is_typeshed_stub_file = False # Are we analyzing a typeshed stub file?
imports: Set[str] # Imported modules (during phase 2 analysis)
# Note: some imports (and therefore dependencies) might
# not be found in phase 1, for example due to * imports.
errors: Errors # Keeps track of generated errors
plugin: Plugin # Mypy plugin for special casing of library features
statement: Optional[Statement] = None # Statement/definition being analyzed
future_import_flags: Set[str]
# Mapping from 'async def' function definitions to their return type wrapped as a
# 'Coroutine[Any, Any, T]'. Used to keep track of whether a function definition's
# return type has already been wrapped, by checking if the function definition's
# type is stored in this mapping and that it still matches.
wrapped_coro_return_types: Dict[FuncDef, Type] = {}
def __init__(self,
modules: Dict[str, MypyFile],
missing_modules: Set[str],
incomplete_namespaces: Set[str],
errors: Errors,
plugin: Plugin) -> None:
"""Construct semantic analyzer.
We reuse the same semantic analyzer instance across multiple modules.
Args:
modules: Global modules dictionary
missing_modules: Modules that could not be imported encountered so far
incomplete_namespaces: Namespaces that are being populated during semantic analysis
(can contain modules and classes within the current SCC; mutated by the caller)
errors: Report analysis errors using this instance
"""
self.locals = [None]
self.is_comprehension_stack = [False]
# Saved namespaces from previous iteration. Every top-level function/method body is
# analyzed in several iterations until all names are resolved. We need to save
# the local namespaces for the top level function and all nested functions between
# these iterations. See also semanal_main.process_top_level_function().
self.saved_locals: Dict[
Union[FuncItem, GeneratorExpr, DictionaryComprehension], SymbolTable
] = {}
self.imports = set()
self.type = None
self.type_stack = []
# Are the namespaces of classes being processed complete?
self.incomplete_type_stack: List[bool] = []
self.tvar_scope = TypeVarLikeScope()
self.function_stack = []
self.block_depth = [0]
self.loop_depth = 0
self.errors = errors
self.modules = modules
self.msg = MessageBuilder(errors, modules)
self.missing_modules = missing_modules
self.missing_names = [set()]
# These namespaces are still in process of being populated. If we encounter a
# missing name in these namespaces, we need to defer the current analysis target,
# since it's possible that the name will be there once the namespace is complete.
self.incomplete_namespaces = incomplete_namespaces
self.all_exports: List[str] = []
# Map from module id to list of explicitly exported names (i.e. names in __all__).
self.export_map: Dict[str, List[str]] = {}
self.plugin = plugin
# If True, process function definitions. If False, don't. This is used
# for processing module top levels in fine-grained incremental mode.
self.recurse_into_functions = True
self.scope = Scope()
# Trace line numbers for every file where deferral happened during analysis of
# current SCC or top-level function.
self.deferral_debug_context: List[Tuple[str, int]] = []
self.future_import_flags: Set[str] = set()
# mypyc doesn't properly handle implementing an abstractproperty
# with a regular attribute so we make them properties
@property
def is_stub_file(self) -> bool:
return self._is_stub_file
@property
def is_typeshed_stub_file(self) -> bool:
return self._is_typeshed_stub_file
@property
def final_iteration(self) -> bool:
return self._final_iteration
#
# Preparing module (performed before semantic analysis)
#
def prepare_file(self, file_node: MypyFile) -> None:
"""Prepare a freshly parsed file for semantic analysis."""
if 'builtins' in self.modules:
file_node.names['__builtins__'] = SymbolTableNode(GDEF,
self.modules['builtins'])
if file_node.fullname == 'builtins':
self.prepare_builtins_namespace(file_node)
if file_node.fullname == 'typing':
self.prepare_typing_namespace(file_node)
def prepare_typing_namespace(self, file_node: MypyFile) -> None:
"""Remove dummy alias definitions such as List = TypeAlias(object) from typing.
They will be replaced with real aliases when corresponding targets are ready.
"""
# This is all pretty unfortunate. typeshed now has a
# sys.version_info check for OrderedDict, and we shouldn't
# take it out, because it is correct and a typechecker should
# use that as a source of truth. But instead we rummage
# through IfStmts to remove the info first. (I tried to
# remove this whole machinery and ran into issues with the
# builtins/typing import cycle.)
def helper(defs: List[Statement]) -> None:
for stmt in defs.copy():
if isinstance(stmt, IfStmt):
for body in stmt.body:
helper(body.body)
if stmt.else_body:
helper(stmt.else_body.body)
if (isinstance(stmt, AssignmentStmt) and len(stmt.lvalues) == 1 and
isinstance(stmt.lvalues[0], NameExpr)):
# Assignment to a simple name, remove it if it is a dummy alias.
if 'typing.' + stmt.lvalues[0].name in type_aliases:
defs.remove(stmt)
helper(file_node.defs)
def prepare_builtins_namespace(self, file_node: MypyFile) -> None:
"""Add certain special-cased definitions to the builtins module.
Some definitions are too special or fundamental to be processed
normally from the AST.
"""
names = file_node.names
# Add empty definition for core built-in classes, since they are required for basic
# operation. These will be completed later on.
for name in CORE_BUILTIN_CLASSES:
cdef = ClassDef(name, Block([])) # Dummy ClassDef, will be replaced later
info = TypeInfo(SymbolTable(), cdef, 'builtins')
info._fullname = 'builtins.%s' % name
names[name] = SymbolTableNode(GDEF, info)
bool_info = names['bool'].node
assert isinstance(bool_info, TypeInfo)
bool_type = Instance(bool_info, [])
special_var_types: List[Tuple[str, Type]] = [
('None', NoneType()),
# reveal_type is a mypy-only function that gives an error with
# the type of its arg.
('reveal_type', AnyType(TypeOfAny.special_form)),
# reveal_locals is a mypy-only function that gives an error with the types of
# locals
('reveal_locals', AnyType(TypeOfAny.special_form)),
('True', bool_type),
('False', bool_type),
('__debug__', bool_type),
]
for name, typ in special_var_types:
v = Var(name, typ)
v._fullname = 'builtins.%s' % name
file_node.names[name] = SymbolTableNode(GDEF, v)
#
# Analyzing a target
#
def refresh_partial(self,
node: Union[MypyFile, FuncDef, OverloadedFuncDef],
patches: List[Tuple[int, Callable[[], None]]],
final_iteration: bool,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> None:
"""Refresh a stale target in fine-grained incremental mode."""
self.patches = patches
self.deferred = False
self.incomplete = False
self._final_iteration = final_iteration
self.missing_names[-1] = set()
with self.file_context(file_node, options, active_type):
if isinstance(node, MypyFile):
self.refresh_top_level(node)
else:
self.recurse_into_functions = True
self.accept(node)
del self.patches
def refresh_top_level(self, file_node: MypyFile) -> None:
"""Reanalyze a stale module top-level in fine-grained incremental mode."""
self.recurse_into_functions = False
self.add_implicit_module_attrs(file_node)
for d in file_node.defs:
self.accept(d)
if file_node.fullname == 'typing':
self.add_builtin_aliases(file_node)
self.adjust_public_exports()
self.export_map[self.cur_mod_id] = self.all_exports
self.all_exports = []
def add_implicit_module_attrs(self, file_node: MypyFile) -> None:
"""Manually add implicit definitions of module '__name__' etc."""
for name, t in implicit_module_attrs.items():
# unicode docstrings should be accepted in Python 2
if name == '__doc__':
if self.options.python_version >= (3, 0):
typ: Type = UnboundType("__builtins__.str")
else:
typ = UnionType([UnboundType('__builtins__.str'),
UnboundType('__builtins__.unicode')])
else:
assert t is not None, 'type should be specified for {}'.format(name)
typ = UnboundType(t)
existing = file_node.names.get(name)
if existing is not None and not isinstance(existing.node, PlaceholderNode):
# Already exists.
continue
an_type = self.anal_type(typ)
if an_type:
var = Var(name, an_type)
var._fullname = self.qualified_name(name)
var.is_ready = True
self.add_symbol(name, var, dummy_context())
else:
self.add_symbol(name,
PlaceholderNode(self.qualified_name(name), file_node, -1),
dummy_context())
def add_builtin_aliases(self, tree: MypyFile) -> None:
"""Add builtin type aliases to typing module.
For historical reasons, the aliases like `List = list` are not defined
in typeshed stubs for typing module. Instead we need to manually add the
corresponding nodes on the fly. We explicitly mark these aliases as normalized,
so that a user can write `typing.List[int]`.
"""
assert tree.fullname == 'typing'
for alias, target_name in type_aliases.items():
if type_aliases_source_versions[alias] > self.options.python_version:
# This alias is not available on this Python version.
continue
name = alias.split('.')[-1]
if name in tree.names and not isinstance(tree.names[name].node, PlaceholderNode):
continue
tag = self.track_incomplete_refs()
n = self.lookup_fully_qualified_or_none(target_name)
if n:
if isinstance(n.node, PlaceholderNode):
self.mark_incomplete(name, tree)
else:
# Found built-in class target. Create alias.
target = self.named_type_or_none(target_name, [])
assert target is not None
# Transform List to List[Any], etc.
fix_instance_types(target, self.fail, self.note, self.options.python_version)
alias_node = TypeAlias(target, alias,
line=-1, column=-1, # there is no context
no_args=True, normalized=True)
self.add_symbol(name, alias_node, tree)
elif self.found_incomplete_ref(tag):
# Built-in class target may not ready yet -- defer.
self.mark_incomplete(name, tree)
else:
# Test fixtures may be missing some builtin classes, which is okay.
# Kill the placeholder if there is one.
if name in tree.names:
assert isinstance(tree.names[name].node, PlaceholderNode)
del tree.names[name]
def adjust_public_exports(self) -> None:
"""Adjust the module visibility of globals due to __all__."""
if '__all__' in self.globals:
for name, g in self.globals.items():
# Being included in __all__ explicitly exports and makes public.
if name in self.all_exports:
g.module_public = True
g.module_hidden = False
# But when __all__ is defined, and a symbol is not included in it,
# it cannot be public.
else:
g.module_public = False
@contextmanager
def file_context(self,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> Iterator[None]:
"""Configure analyzer for analyzing targets within a file/class.
Args:
file_node: target file
options: options specific to the file
active_type: must be the surrounding class to analyze method targets
"""
scope = self.scope
self.options = options
self.errors.set_file(file_node.path, file_node.fullname, scope=scope)
self.cur_mod_node = file_node
self.cur_mod_id = file_node.fullname
scope.enter_file(self.cur_mod_id)
self._is_stub_file = file_node.path.lower().endswith('.pyi')
self._is_typeshed_stub_file = is_typeshed_file(file_node.path)
self.globals = file_node.names
self.tvar_scope = TypeVarLikeScope()
self.named_tuple_analyzer = NamedTupleAnalyzer(options, self)
self.typed_dict_analyzer = TypedDictAnalyzer(options, self, self.msg)
self.enum_call_analyzer = EnumCallAnalyzer(options, self)
self.newtype_analyzer = NewTypeAnalyzer(options, self, self.msg)
# Counter that keeps track of references to undefined things potentially caused by
# incomplete namespaces.
self.num_incomplete_refs = 0
if active_type:
self.incomplete_type_stack.append(False)
scope.enter_class(active_type)
self.enter_class(active_type.defn.info)
for tvar in active_type.defn.type_vars:
self.tvar_scope.bind_existing(tvar)
yield
if active_type:
scope.leave()
self.leave_class()
self.type = None
self.incomplete_type_stack.pop()
scope.leave()
del self.options
#
# Functions
#
def visit_func_def(self, defn: FuncDef) -> None:
self.statement = defn
# Visit default values because they may contain assignment expressions.
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
defn.is_conditional = self.block_depth[-1] > 0
# Set full names even for those definitions that aren't added
# to a symbol table. For example, for overload items.
defn._fullname = self.qualified_name(defn.name)
# We don't add module top-level functions to symbol tables
# when we analyze their bodies in the second phase on analysis,
# since they were added in the first phase. Nested functions
# get always added, since they aren't separate targets.
if not self.recurse_into_functions or len(self.function_stack) > 0:
if not defn.is_decorated and not defn.is_overload:
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
self.analyze_func_def(defn)
def analyze_func_def(self, defn: FuncDef) -> None:
self.function_stack.append(defn)
if defn.type:
assert isinstance(defn.type, CallableType)
self.update_function_type_variables(defn.type, defn)
self.function_stack.pop()
if self.is_class_scope():
# Method definition
assert self.type is not None
defn.info = self.type
if defn.type is not None and defn.name in ('__init__', '__init_subclass__'):
assert isinstance(defn.type, CallableType)
if isinstance(get_proper_type(defn.type.ret_type), AnyType):
defn.type = defn.type.copy_modified(ret_type=NoneType())
self.prepare_method_signature(defn, self.type)
# Analyze function signature
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
if defn.type:
self.check_classvar_in_signature(defn.type)
assert isinstance(defn.type, CallableType)
# Signature must be analyzed in the surrounding scope so that
# class-level imported names and type variables are in scope.
analyzer = self.type_analyzer()
tag = self.track_incomplete_refs()
result = analyzer.visit_callable_type(defn.type, nested=False)
# Don't store not ready types (including placeholders).
if self.found_incomplete_ref(tag) or has_placeholder(result):
self.defer(defn)
return
assert isinstance(result, ProperType)
defn.type = result
self.add_type_alias_deps(analyzer.aliases_used)
self.check_function_signature(defn)
if isinstance(defn, FuncDef):
assert isinstance(defn.type, CallableType)
defn.type = set_callable_name(defn.type, defn)
self.analyze_arg_initializers(defn)
self.analyze_function_body(defn)
if (defn.is_coroutine and
isinstance(defn.type, CallableType) and
self.wrapped_coro_return_types.get(defn) != defn.type):
if defn.is_async_generator:
# Async generator types are handled elsewhere
pass
else:
# A coroutine defined as `async def foo(...) -> T: ...`
# has external return type `Coroutine[Any, Any, T]`.
any_type = AnyType(TypeOfAny.special_form)
ret_type = self.named_type_or_none('typing.Coroutine',
[any_type, any_type, defn.type.ret_type])
assert ret_type is not None, "Internal error: typing.Coroutine not found"
defn.type = defn.type.copy_modified(ret_type=ret_type)
self.wrapped_coro_return_types[defn] = defn.type
def prepare_method_signature(self, func: FuncDef, info: TypeInfo) -> None:
"""Check basic signature validity and tweak annotation of self/cls argument."""
# Only non-static methods are special.
functype = func.type
if not func.is_static:
if func.name in ['__init_subclass__', '__class_getitem__']:
func.is_class = True
if not func.arguments:
self.fail('Method must have at least one argument', func)
elif isinstance(functype, CallableType):
self_type = get_proper_type(functype.arg_types[0])
if isinstance(self_type, AnyType):
leading_type: Type = fill_typevars(info)
if func.is_class or func.name == '__new__':
leading_type = self.class_type(leading_type)
func.type = replace_implicit_first_type(functype, leading_type)
def set_original_def(self, previous: Optional[Node], new: Union[FuncDef, Decorator]) -> bool:
"""If 'new' conditionally redefine 'previous', set 'previous' as original
We reject straight redefinitions of functions, as they are usually
a programming error. For example:
def f(): ...
def f(): ... # Error: 'f' redefined
"""
if isinstance(new, Decorator):
new = new.func
if (
isinstance(previous, (FuncDef, Decorator))
and unnamed_function(new.name)
and unnamed_function(previous.name)
):
return True
if isinstance(previous, (FuncDef, Var, Decorator)) and new.is_conditional:
new.original_def = previous
return True
else:
return False
def update_function_type_variables(self, fun_type: CallableType, defn: FuncItem) -> None:
"""Make any type variables in the signature of defn explicit.
Update the signature of defn to contain type variable definitions
if defn is generic.
"""
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
a = self.type_analyzer()
fun_type.variables = a.bind_function_type_variables(fun_type, defn)
def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
self.statement = defn
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
# NB: Since _visit_overloaded_func_def will call accept on the
# underlying FuncDefs, the function might get entered twice.
# This is fine, though, because only the outermost function is
# used to compute targets.
with self.scope.function_scope(defn):
self.analyze_overloaded_func_def(defn)
def analyze_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
# OverloadedFuncDef refers to any legitimate situation where you have
# more than one declaration for the same function in a row. This occurs
# with a @property with a setter or a deleter, and for a classic
# @overload.
defn._fullname = self.qualified_name(defn.name)
# TODO: avoid modifying items.
defn.items = defn.unanalyzed_items.copy()
first_item = defn.items[0]
first_item.is_overload = True
first_item.accept(self)
if isinstance(first_item, Decorator) and first_item.func.is_property:
# This is a property.
first_item.func.is_overload = True
self.analyze_property_with_multi_part_definition(defn)
typ = function_type(first_item.func, self.builtin_type('builtins.function'))
assert isinstance(typ, CallableType)
types = [typ]
else:
# This is an a normal overload. Find the item signatures, the
# implementation (if outside a stub), and any missing @overload
# decorators.
types, impl, non_overload_indexes = self.analyze_overload_sigs_and_impl(defn)
defn.impl = impl
if non_overload_indexes:
self.handle_missing_overload_decorators(defn, non_overload_indexes,
some_overload_decorators=len(types) > 0)
# If we found an implementation, remove it from the overload item list,
# as it's special.
if impl is not None:
assert impl is defn.items[-1]
defn.items = defn.items[:-1]
elif not non_overload_indexes:
self.handle_missing_overload_implementation(defn)
if types:
defn.type = Overloaded(types)
defn.type.line = defn.line
if not defn.items:
# It was not a real overload after all, but function redefinition. We've
# visited the redefinition(s) already.
if not defn.impl:
# For really broken overloads with no items and no implementation we need to keep
# at least one item to hold basic information like function name.
defn.impl = defn.unanalyzed_items[-1]
return
# We know this is an overload def. Infer properties and perform some checks.
self.process_final_in_overload(defn)
self.process_static_or_class_method_in_overload(defn)
def analyze_overload_sigs_and_impl(
self,
defn: OverloadedFuncDef) -> Tuple[List[CallableType],
Optional[OverloadPart],
List[int]]:
"""Find overload signatures, the implementation, and items with missing @overload.
Assume that the first was already analyzed. As a side effect:
analyzes remaining items and updates 'is_overload' flags.
"""
types = []
non_overload_indexes = []
impl: Optional[OverloadPart] = None
for i, item in enumerate(defn.items):
if i != 0:
# Assume that the first item was already visited
item.is_overload = True
item.accept(self)
# TODO: support decorated overloaded functions properly
if isinstance(item, Decorator):
callable = function_type(item.func, self.builtin_type('builtins.function'))
assert isinstance(callable, CallableType)
if not any(refers_to_fullname(dec, 'typing.overload')
for dec in item.decorators):
if i == len(defn.items) - 1 and not self.is_stub_file:
# Last item outside a stub is impl
impl = item
else:
# Oops it wasn't an overload after all. A clear error
# will vary based on where in the list it is, record
# that.
non_overload_indexes.append(i)
else:
item.func.is_overload = True
types.append(callable)
elif isinstance(item, FuncDef):
if i == len(defn.items) - 1 and not self.is_stub_file:
impl = item
else:
non_overload_indexes.append(i)
return types, impl, non_overload_indexes
def handle_missing_overload_decorators(self,
defn: OverloadedFuncDef,
non_overload_indexes: List[int],
some_overload_decorators: bool) -> None:
"""Generate errors for overload items without @overload.
Side effect: remote non-overload items.
"""
if some_overload_decorators:
# Some of them were overloads, but not all.
for idx in non_overload_indexes:
if self.is_stub_file:
self.fail("An implementation for an overloaded function "
"is not allowed in a stub file", defn.items[idx])
else:
self.fail("The implementation for an overloaded function "
"must come last", defn.items[idx])
else:
for idx in non_overload_indexes[1:]:
self.name_already_defined(defn.name, defn.items[idx], defn.items[0])
if defn.impl:
self.name_already_defined(defn.name, defn.impl, defn.items[0])
# Remove the non-overloads
for idx in reversed(non_overload_indexes):
del defn.items[idx]
def handle_missing_overload_implementation(self, defn: OverloadedFuncDef) -> None:
"""Generate error about missing overload implementation (only if needed)."""
if not self.is_stub_file:
if self.type and self.type.is_protocol and not self.is_func_scope():
# An overloaded protocol method doesn't need an implementation.
for item in defn.items:
if isinstance(item, Decorator):
item.func.is_abstract = True
else:
item.is_abstract = True
else:
self.fail(
"An overloaded function outside a stub file must have an implementation",
defn)
def process_final_in_overload(self, defn: OverloadedFuncDef) -> None:
"""Detect the @final status of an overloaded function (and perform checks)."""
# If the implementation is marked as @final (or the first overload in
# stubs), then the whole overloaded definition if @final.
if any(item.is_final for item in defn.items):
# We anyway mark it as final because it was probably the intention.
defn.is_final = True
# Only show the error once per overload
bad_final = next(ov for ov in defn.items if ov.is_final)
if not self.is_stub_file:
self.fail("@final should be applied only to overload implementation",
bad_final)
elif any(item.is_final for item in defn.items[1:]):
bad_final = next(ov for ov in defn.items[1:] if ov.is_final)
self.fail("In a stub file @final must be applied only to the first overload",
bad_final)
if defn.impl is not None and defn.impl.is_final:
defn.is_final = True
def process_static_or_class_method_in_overload(self, defn: OverloadedFuncDef) -> None:
class_status = []
static_status = []
for item in defn.items:
if isinstance(item, Decorator):
inner = item.func
elif isinstance(item, FuncDef):
inner = item
else:
assert False, "The 'item' variable is an unexpected type: {}".format(type(item))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if defn.impl is not None:
if isinstance(defn.impl, Decorator):
inner = defn.impl.func
elif isinstance(defn.impl, FuncDef):
inner = defn.impl
else:
assert False, "Unexpected impl type: {}".format(type(defn.impl))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if len(set(class_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('classmethod', defn)
elif len(set(static_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('staticmethod', defn)
else:
defn.is_class = class_status[0]
defn.is_static = static_status[0]
def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) -> None:
"""Analyze a property defined using multiple methods (e.g., using @x.setter).
Assume that the first method (@property) has already been analyzed.
"""
defn.is_property = True
items = defn.items
first_item = cast(Decorator, defn.items[0])
deleted_items = []
for i, item in enumerate(items[1:]):
if isinstance(item, Decorator):
if len(item.decorators) == 1:
node = item.decorators[0]
if isinstance(node, MemberExpr):
if node.name == 'setter':
# The first item represents the entire property.
first_item.var.is_settable_property = True
# Get abstractness from the original definition.
item.func.is_abstract = first_item.func.is_abstract
else:
self.fail("Decorated property not supported", item)
item.func.accept(self)
else:
self.fail('Unexpected definition for property "{}"'.format(first_item.func.name),
item)
deleted_items.append(i + 1)
for i in reversed(deleted_items):
del items[i]
def add_function_to_symbol_table(self, func: Union[FuncDef, OverloadedFuncDef]) -> None:
if self.is_class_scope():
assert self.type is not None
func.info = self.type
func._fullname = self.qualified_name(func.name)
self.add_symbol(func.name, func, func)
def analyze_arg_initializers(self, defn: FuncItem) -> None:
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
# Analyze default arguments
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
def analyze_function_body(self, defn: FuncItem) -> None:
is_method = self.is_class_scope()
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
# Bind the type variables again to visit the body.
if defn.type:
a = self.type_analyzer()
a.bind_function_type_variables(cast(CallableType, defn.type), defn)
self.function_stack.append(defn)
self.enter(defn)
for arg in defn.arguments:
self.add_local(arg.variable, defn)
# The first argument of a non-static, non-class method is like 'self'
# (though the name could be different), having the enclosing class's
# instance type.
if is_method and not defn.is_static and not defn.is_class and defn.arguments:
defn.arguments[0].variable.is_self = True
defn.body.accept(self)
self.leave()
self.function_stack.pop()
def check_classvar_in_signature(self, typ: ProperType) -> None:
if isinstance(typ, Overloaded):
for t in typ.items(): # type: ProperType
self.check_classvar_in_signature(t)
return
if not isinstance(typ, CallableType):
return
for t in get_proper_types(typ.arg_types) + [get_proper_type(typ.ret_type)]:
if self.is_classvar(t):
self.fail_invalid_classvar(t)
# Show only one error per signature
break
def check_function_signature(self, fdef: FuncItem) -> None:
sig = fdef.type
assert isinstance(sig, CallableType)
if len(sig.arg_types) < len(fdef.arguments):
self.fail('Type signature has too few arguments', fdef)
# Add dummy Any arguments to prevent crashes later.
num_extra_anys = len(fdef.arguments) - len(sig.arg_types)
extra_anys = [AnyType(TypeOfAny.from_error)] * num_extra_anys
sig.arg_types.extend(extra_anys)
elif len(sig.arg_types) > len(fdef.arguments):
self.fail('Type signature has too many arguments', fdef, blocker=True)
def visit_decorator(self, dec: Decorator) -> None:
self.statement = dec
# TODO: better don't modify them at all.
dec.decorators = dec.original_decorators.copy()
dec.func.is_conditional = self.block_depth[-1] > 0
if not dec.is_overload:
self.add_symbol(dec.name, dec, dec)
dec.func._fullname = self.qualified_name(dec.name)
for d in dec.decorators:
d.accept(self)
removed: List[int] = []
no_type_check = False
for i, d in enumerate(dec.decorators):
# A bunch of decorators are special cased here.
if refers_to_fullname(d, 'abc.abstractmethod'):
removed.append(i)
dec.func.is_abstract = True
self.check_decorated_function_is_method('abstractmethod', dec)
elif (refers_to_fullname(d, 'asyncio.coroutines.coroutine') or
refers_to_fullname(d, 'types.coroutine')):
removed.append(i)
dec.func.is_awaitable_coroutine = True
elif refers_to_fullname(d, 'builtins.staticmethod'):
removed.append(i)
dec.func.is_static = True
dec.var.is_staticmethod = True
self.check_decorated_function_is_method('staticmethod', dec)
elif refers_to_fullname(d, 'builtins.classmethod'):
removed.append(i)
dec.func.is_class = True
dec.var.is_classmethod = True
self.check_decorated_function_is_method('classmethod', dec)
elif (refers_to_fullname(d, 'builtins.property') or
refers_to_fullname(d, 'abc.abstractproperty') or
refers_to_fullname(d, 'functools.cached_property')):
removed.append(i)
dec.func.is_property = True
dec.var.is_property = True
if refers_to_fullname(d, 'abc.abstractproperty'):
dec.func.is_abstract = True
elif refers_to_fullname(d, 'functools.cached_property'):
dec.var.is_settable_property = True
self.check_decorated_function_is_method('property', dec)
if len(dec.func.arguments) > 1:
self.fail('Too many arguments', dec.func)
elif refers_to_fullname(d, 'typing.no_type_check'):
dec.var.type = AnyType(TypeOfAny.special_form)
no_type_check = True
elif (refers_to_fullname(d, 'typing.final') or
refers_to_fullname(d, 'typing_extensions.final')):
if self.is_class_scope():
assert self.type is not None, "No type set at class scope"
if self.type.is_protocol:
self.msg.protocol_members_cant_be_final(d)
else:
dec.func.is_final = True
dec.var.is_final = True
removed.append(i)
else:
self.fail("@final cannot be used with non-method functions", d)
for i in reversed(removed):
del dec.decorators[i]
if (not dec.is_overload or dec.var.is_property) and self.type:
dec.var.info = self.type
dec.var.is_initialized_in_class = True
if not no_type_check and self.recurse_into_functions:
dec.func.accept(self)
if dec.decorators and dec.var.is_property:
self.fail('Decorated property not supported', dec)
def check_decorated_function_is_method(self, decorator: str,
context: Context) -> None:
if not self.type or self.is_func_scope():
self.fail('"%s" used with a non-method' % decorator, context)
#
# Classes
#
def visit_class_def(self, defn: ClassDef) -> None:
self.statement = defn
self.incomplete_type_stack.append(not defn.info)
with self.tvar_scope_frame(self.tvar_scope.class_frame()):
self.analyze_class(defn)
self.incomplete_type_stack.pop()
def analyze_class(self, defn: ClassDef) -> None:
fullname = self.qualified_name(defn.name)
if not defn.info and not self.is_core_builtin_class(defn):
# Add placeholder so that self-references in base classes can be
# resolved. We don't want this to cause a deferral, since if there
# are no incomplete references, we'll replace this with a TypeInfo
# before returning.
placeholder = PlaceholderNode(fullname, defn, defn.line, becomes_typeinfo=True)
self.add_symbol(defn.name, placeholder, defn, can_defer=False)
tag = self.track_incomplete_refs()
# Restore base classes after previous iteration (things like Generic[T] might be removed).
defn.base_type_exprs.extend(defn.removed_base_type_exprs)
defn.removed_base_type_exprs.clear()
self.update_metaclass(defn)
bases = defn.base_type_exprs
bases, tvar_defs, is_protocol = self.clean_up_bases_and_infer_type_variables(defn, bases,
context=defn)
for tvd in tvar_defs:
if any(has_placeholder(t) for t in [tvd.upper_bound] + tvd.values):
# Some type variable bounds or values are not ready, we need
# to re-analyze this class.
self.defer()
self.analyze_class_keywords(defn)
result = self.analyze_base_classes(bases)
if result is None or self.found_incomplete_ref(tag):
# Something was incomplete. Defer current target.
self.mark_incomplete(defn.name, defn)
return
base_types, base_error = result
if any(isinstance(base, PlaceholderType) for base, _ in base_types):
# We need to know the TypeInfo of each base to construct the MRO. Placeholder types
# are okay in nested positions, since they can't affect the MRO.
self.mark_incomplete(defn.name, defn)
return
is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn)
if is_typeddict:
for decorator in defn.decorators:
decorator.accept(self)
if isinstance(decorator, RefExpr):
if decorator.fullname in ('typing.final',
'typing_extensions.final'):
self.fail("@final cannot be used with TypedDict", decorator)
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
return
if self.analyze_namedtuple_classdef(defn):
return
# Create TypeInfo for class now that base classes and the MRO can be calculated.
self.prepare_class_def(defn)
defn.type_vars = tvar_defs
defn.info.type_vars = [tvar.name for tvar in tvar_defs]
if base_error:
defn.info.fallback_to_any = True
with self.scope.class_scope(defn.info):
self.configure_base_classes(defn, base_types)
defn.info.is_protocol = is_protocol
self.analyze_metaclass(defn)
defn.info.runtime_protocol = False
for decorator in defn.decorators:
self.analyze_class_decorator(defn, decorator)
self.analyze_class_body_common(defn)
def is_core_builtin_class(self, defn: ClassDef) -> bool:
return self.cur_mod_id == 'builtins' and defn.name in CORE_BUILTIN_CLASSES
def analyze_class_body_common(self, defn: ClassDef) -> None:
"""Parts of class body analysis that are common to all kinds of class defs."""
self.enter_class(defn.info)
defn.defs.accept(self)
self.apply_class_plugin_hooks(defn)
self.leave_class()
def analyze_namedtuple_classdef(self, defn: ClassDef) -> bool:
"""Check if this class can define a named tuple."""
if defn.info and defn.info.is_named_tuple:
# Don't reprocess everything. We just need to process methods defined
# in the named tuple class body.
is_named_tuple, info = True, defn.info # type: bool, Optional[TypeInfo]
else:
is_named_tuple, info = self.named_tuple_analyzer.analyze_namedtuple_classdef(
defn, self.is_stub_file)
if is_named_tuple:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
with self.scope.class_scope(defn.info):
with self.named_tuple_analyzer.save_namedtuple_body(info):
self.analyze_class_body_common(defn)
return True
return False
def apply_class_plugin_hooks(self, defn: ClassDef) -> None:
"""Apply a plugin hook that may infer a more precise definition for a class."""
def get_fullname(expr: Expression) -> Optional[str]:
if isinstance(expr, CallExpr):
return get_fullname(expr.callee)
elif isinstance(expr, IndexExpr):
return get_fullname(expr.base)
elif isinstance(expr, RefExpr):
if expr.fullname:
return expr.fullname
# If we don't have a fullname look it up. This happens because base classes are
# analyzed in a different manner (see exprtotype.py) and therefore those AST
# nodes will not have full names.
sym = self.lookup_type_node(expr)
if sym:
return sym.fullname
return None
for decorator in defn.decorators:
decorator_name = get_fullname(decorator)
if decorator_name:
hook = self.plugin.get_class_decorator_hook(decorator_name)
if hook:
hook(ClassDefContext(defn, decorator, self))
if defn.metaclass:
metaclass_name = get_fullname(defn.metaclass)
if metaclass_name:
hook = self.plugin.get_metaclass_hook(metaclass_name)
if hook:
hook(ClassDefContext(defn, defn.metaclass, self))
for base_expr in defn.base_type_exprs:
base_name = get_fullname(base_expr)
if base_name:
hook = self.plugin.get_base_class_hook(base_name)
if hook:
hook(ClassDefContext(defn, base_expr, self))
def analyze_class_keywords(self, defn: ClassDef) -> None:
for value in defn.keywords.values():
value.accept(self)
def enter_class(self, info: TypeInfo) -> None:
# Remember previous active class
self.type_stack.append(self.type)
self.locals.append(None) # Add class scope
self.is_comprehension_stack.append(False)
self.block_depth.append(-1) # The class body increments this to 0
self.type = info
self.missing_names.append(set())
def leave_class(self) -> None:
""" Restore analyzer state. """
self.block_depth.pop()
self.locals.pop()
self.is_comprehension_stack.pop()
self.type = self.type_stack.pop()
self.missing_names.pop()
def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None:
decorator.accept(self)
if isinstance(decorator, RefExpr):
if decorator.fullname in RUNTIME_PROTOCOL_DECOS:
if defn.info.is_protocol:
defn.info.runtime_protocol = True
else:
self.fail('@runtime_checkable can only be used with protocol classes',
defn)
elif decorator.fullname in ('typing.final',
'typing_extensions.final'):
defn.info.is_final = True
def clean_up_bases_and_infer_type_variables(
self,
defn: ClassDef,
base_type_exprs: List[Expression],
context: Context) -> Tuple[List[Expression],
List[TypeVarDef],
bool]:
"""Remove extra base classes such as Generic and infer type vars.
For example, consider this class:
class Foo(Bar, Generic[T]): ...
Now we will remove Generic[T] from bases of Foo and infer that the
type variable 'T' is a type argument of Foo.
Note that this is performed *before* semantic analysis.
Returns (remaining base expressions, inferred type variables, is protocol).
"""
removed: List[int] = []
declared_tvars: TypeVarLikeList = []
is_protocol = False
for i, base_expr in enumerate(base_type_exprs):
self.analyze_type_expr(base_expr)
try:
base = self.expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
result = self.analyze_class_typevar_declaration(base)
if result is not None:
if declared_tvars:
self.fail('Only single Generic[...] or Protocol[...] can be in bases', context)
removed.append(i)
tvars = result[0]
is_protocol |= result[1]
declared_tvars.extend(tvars)
if isinstance(base, UnboundType):
sym = self.lookup_qualified(base.name, base)
if sym is not None and sym.node is not None:
if (sym.node.fullname in ('typing.Protocol', 'typing_extensions.Protocol') and
i not in removed):
# also remove bare 'Protocol' bases
removed.append(i)
is_protocol = True
all_tvars = self.get_all_bases_tvars(base_type_exprs, removed)
if declared_tvars:
if len(remove_dups(declared_tvars)) < len(declared_tvars):
self.fail("Duplicate type variables in Generic[...] or Protocol[...]", context)
declared_tvars = remove_dups(declared_tvars)
if not set(all_tvars).issubset(set(declared_tvars)):
self.fail("If Generic[...] or Protocol[...] is present"
" it should list all type variables", context)
# In case of error, Generic tvars will go first
declared_tvars = remove_dups(declared_tvars + all_tvars)
else:
declared_tvars = all_tvars
for i in reversed(removed):
# We need to actually remove the base class expressions like Generic[T],
# mostly because otherwise they will create spurious dependencies in fine
# grained incremental mode.
defn.removed_base_type_exprs.append(defn.base_type_exprs[i])
del base_type_exprs[i]
tvar_defs: List[TypeVarDef] = []
for name, tvar_expr in declared_tvars:
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
assert isinstance(tvar_def, TypeVarDef), (
"mypy does not currently support ParamSpec use in generic classes"
)
tvar_defs.append(tvar_def)
return base_type_exprs, tvar_defs, is_protocol
def analyze_class_typevar_declaration(
self,
base: Type
) -> Optional[Tuple[TypeVarLikeList, bool]]:
"""Analyze type variables declared using Generic[...] or Protocol[...].
Args:
base: Non-analyzed base class
Return None if the base class does not declare type variables. Otherwise,
return the type variables.
"""
if not isinstance(base, UnboundType):
return None
unbound = base
sym = self.lookup_qualified(unbound.name, unbound)
if sym is None or sym.node is None:
return None
if (sym.node.fullname == 'typing.Generic' or
sym.node.fullname == 'typing.Protocol' and base.args or
sym.node.fullname == 'typing_extensions.Protocol' and base.args):
is_proto = sym.node.fullname != 'typing.Generic'
tvars: TypeVarLikeList = []
for arg in unbound.args:
tag = self.track_incomplete_refs()
tvar = self.analyze_unbound_tvar(arg)
if tvar:
tvars.append(tvar)
elif not self.found_incomplete_ref(tag):
self.fail('Free type variable expected in %s[...]' %
sym.node.name, base)
return tvars, is_proto
return None
def analyze_unbound_tvar(self, t: Type) -> Optional[Tuple[str, TypeVarExpr]]:
if not isinstance(t, UnboundType):
return None
unbound = t
sym = self.lookup_qualified(unbound.name, unbound)
if sym and isinstance(sym.node, PlaceholderNode):
self.record_incomplete_ref()
if sym is None or not isinstance(sym.node, TypeVarExpr):
return None
elif sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
# It's bound by our type variable scope
return None
else:
assert isinstance(sym.node, TypeVarExpr)
return unbound.name, sym.node
def get_all_bases_tvars(self,
base_type_exprs: List[Expression],
removed: List[int]) -> TypeVarLikeList:
"""Return all type variable references in bases."""
tvars: TypeVarLikeList = []
for i, base_expr in enumerate(base_type_exprs):
if i not in removed:
try:
base = self.expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
base_tvars = base.accept(TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope))
tvars.extend(base_tvars)
return remove_dups(tvars)
def prepare_class_def(self, defn: ClassDef, info: Optional[TypeInfo] = None) -> None:
"""Prepare for the analysis of a class definition.
Create an empty TypeInfo and store it in a symbol table, or if the 'info'
argument is provided, store it instead (used for magic type definitions).
"""
if not defn.info:
defn.fullname = self.qualified_name(defn.name)
# TODO: Nested classes
info = info or self.make_empty_type_info(defn)
defn.info = info
info.defn = defn
if not self.is_func_scope():
info._fullname = self.qualified_name(defn.name)
else:
info._fullname = info.name
self.add_symbol(defn.name, defn.info, defn)
if self.is_nested_within_func_scope():
# We need to preserve local classes, let's store them
# in globals under mangled unique names
#
# TODO: Putting local classes into globals breaks assumptions in fine-grained
# incremental mode and we should avoid it. In general, this logic is too
# ad-hoc and needs to be removed/refactored.
if '@' not in defn.info._fullname:
local_name = defn.info.name + '@' + str(defn.line)
if defn.info.is_named_tuple:
# Module is already correctly set in _fullname for named tuples.
defn.info._fullname += '@' + str(defn.line)
else:
defn.info._fullname = self.cur_mod_id + '.' + local_name
else:
# Preserve name from previous fine-grained incremental run.
local_name = defn.info.name
defn.fullname = defn.info._fullname
self.globals[local_name] = SymbolTableNode(GDEF, defn.info)
def make_empty_type_info(self, defn: ClassDef) -> TypeInfo:
if (self.is_module_scope()
and self.cur_mod_id == 'builtins'
and defn.name in CORE_BUILTIN_CLASSES):
# Special case core built-in classes. A TypeInfo was already
# created for it before semantic analysis, but with a dummy
# ClassDef. Patch the real ClassDef object.
info = self.globals[defn.name].node
assert isinstance(info, TypeInfo)
else:
info = TypeInfo(SymbolTable(), defn, self.cur_mod_id)
info.set_line(defn)
return info
def get_name_repr_of_expr(self, expr: Expression) -> Optional[str]:
"""Try finding a short simplified textual representation of a base class expression."""
if isinstance(expr, NameExpr):
return expr.name
if isinstance(expr, MemberExpr):
return get_member_expr_fullname(expr)
if isinstance(expr, IndexExpr):
return self.get_name_repr_of_expr(expr.base)
if isinstance(expr, CallExpr):
return self.get_name_repr_of_expr(expr.callee)
return None
def analyze_base_classes(
self,
base_type_exprs: List[Expression]) -> Optional[Tuple[List[Tuple[ProperType,
Expression]],
bool]]:
"""Analyze base class types.
Return None if some definition was incomplete. Otherwise, return a tuple
with these items:
* List of (analyzed type, original expression) tuples
* Boolean indicating whether one of the bases had a semantic analysis error
"""
is_error = False
bases = []
for base_expr in base_type_exprs:
if (isinstance(base_expr, RefExpr) and
base_expr.fullname in ('typing.NamedTuple',) + TPDICT_NAMES):
# Ignore magic bases for now.
continue
try:
base = self.expr_to_analyzed_type(base_expr, allow_placeholder=True)
except TypeTranslationError:
name = self.get_name_repr_of_expr(base_expr)
if isinstance(base_expr, CallExpr):
msg = 'Unsupported dynamic base class'
else:
msg = 'Invalid base class'
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
is_error = True
continue
if base is None:
return None
base = get_proper_type(base)
bases.append((base, base_expr))
return bases, is_error
def configure_base_classes(self,
defn: ClassDef,
bases: List[Tuple[ProperType, Expression]]) -> None:
"""Set up base classes.
This computes several attributes on the corresponding TypeInfo defn.info
related to the base classes: defn.info.bases, defn.info.mro, and
miscellaneous others (at least tuple_type, fallback_to_any, and is_enum.)
"""
base_types: List[Instance] = []
info = defn.info
info.tuple_type = None
for base, base_expr in bases:
if isinstance(base, TupleType):
actual_base = self.configure_tuple_base_class(defn, base, base_expr)
base_types.append(actual_base)
elif isinstance(base, Instance):
if base.type.is_newtype:
self.fail('Cannot subclass "NewType"', defn)
base_types.append(base)
elif isinstance(base, AnyType):
if self.options.disallow_subclassing_any:
if isinstance(base_expr, (NameExpr, MemberExpr)):
msg = 'Class cannot subclass "{}" (has type "Any")'.format(base_expr.name)
else:
msg = 'Class cannot subclass value of type "Any"'
self.fail(msg, base_expr)
info.fallback_to_any = True
else:
msg = 'Invalid base class'
name = self.get_name_repr_of_expr(base_expr)
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
info.fallback_to_any = True
if self.options.disallow_any_unimported and has_any_from_unimported_type(base):
if isinstance(base_expr, (NameExpr, MemberExpr)):
prefix = "Base type {}".format(base_expr.name)
else:
prefix = "Base type"
self.msg.unimported_type_becomes_any(prefix, base, base_expr)
check_for_explicit_any(base, self.options, self.is_typeshed_stub_file, self.msg,
context=base_expr)
# Add 'object' as implicit base if there is no other base class.
if not base_types and defn.fullname != 'builtins.object':
base_types.append(self.object_type())
info.bases = base_types
# Calculate the MRO.
if not self.verify_base_classes(defn):
self.set_dummy_mro(defn.info)
return
self.calculate_class_mro(defn, self.object_type)
def configure_tuple_base_class(self,
defn: ClassDef,
base: TupleType,
base_expr: Expression) -> Instance:
info = defn.info
# There may be an existing valid tuple type from previous semanal iterations.
# Use equality to check if it is the case.
if info.tuple_type and info.tuple_type != base:
self.fail("Class has two incompatible bases derived from tuple", defn)
defn.has_incompatible_baseclass = True
info.tuple_type = base
if isinstance(base_expr, CallExpr):
defn.analyzed = NamedTupleExpr(base.partial_fallback.type)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
if base.partial_fallback.type.fullname == 'builtins.tuple':
# Fallback can only be safely calculated after semantic analysis, since base
# classes may be incomplete. Postpone the calculation.
self.schedule_patch(PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(base))
return base.partial_fallback
def set_dummy_mro(self, info: TypeInfo) -> None:
# Give it an MRO consisting of just the class itself and object.
info.mro = [info, self.object_type().type]
info.bad_mro = True
def calculate_class_mro(self, defn: ClassDef,
obj_type: Optional[Callable[[], Instance]] = None) -> None:
"""Calculate method resolution order for a class.
`obj_type` may be omitted in the third pass when all classes are already analyzed.
It exists just to fill in empty base class list during second pass in case of
an import cycle.
"""
try:
calculate_mro(defn.info, obj_type)
except MroError:
self.fail('Cannot determine consistent method resolution '
'order (MRO) for "%s"' % defn.name, defn)
self.set_dummy_mro(defn.info)
# Allow plugins to alter the MRO to handle the fact that `def mro()`
# on metaclasses permits MRO rewriting.
if defn.fullname:
hook = self.plugin.get_customize_class_mro_hook(defn.fullname)
if hook:
hook(ClassDefContext(defn, FakeExpression(), self))
def update_metaclass(self, defn: ClassDef) -> None:
"""Lookup for special metaclass declarations, and update defn fields accordingly.
* __metaclass__ attribute in Python 2
* six.with_metaclass(M, B1, B2, ...)
* @six.add_metaclass(M)
* future.utils.with_metaclass(M, B1, B2, ...)
* past.utils.with_metaclass(M, B1, B2, ...)
"""
# Look for "__metaclass__ = <metaclass>" in Python 2
python2_meta_expr: Optional[Expression] = None
if self.options.python_version[0] == 2:
for body_node in defn.defs.body:
if isinstance(body_node, ClassDef) and body_node.name == "__metaclass__":
self.fail("Metaclasses defined as inner classes are not supported", body_node)
break
elif isinstance(body_node, AssignmentStmt) and len(body_node.lvalues) == 1:
lvalue = body_node.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == "__metaclass__":
python2_meta_expr = body_node.rvalue
# Look for six.with_metaclass(M, B1, B2, ...)
with_meta_expr: Optional[Expression] = None
if len(defn.base_type_exprs) == 1:
base_expr = defn.base_type_exprs[0]
if isinstance(base_expr, CallExpr) and isinstance(base_expr.callee, RefExpr):
base_expr.accept(self)
if (base_expr.callee.fullname in {'six.with_metaclass',
'future.utils.with_metaclass',
'past.utils.with_metaclass'}
and len(base_expr.args) >= 1
and all(kind == ARG_POS for kind in base_expr.arg_kinds)):
with_meta_expr = base_expr.args[0]
defn.base_type_exprs = base_expr.args[1:]
# Look for @six.add_metaclass(M)
add_meta_expr: Optional[Expression] = None
for dec_expr in defn.decorators:
if isinstance(dec_expr, CallExpr) and isinstance(dec_expr.callee, RefExpr):
dec_expr.callee.accept(self)
if (dec_expr.callee.fullname == 'six.add_metaclass'
and len(dec_expr.args) == 1
and dec_expr.arg_kinds[0] == ARG_POS):
add_meta_expr = dec_expr.args[0]
break
metas = {defn.metaclass, python2_meta_expr, with_meta_expr, add_meta_expr} - {None}
if len(metas) == 0:
return
if len(metas) > 1:
self.fail("Multiple metaclass definitions", defn)
return
defn.metaclass = metas.pop()
def verify_base_classes(self, defn: ClassDef) -> bool:
info = defn.info
cycle = False
for base in info.bases:
baseinfo = base.type
if self.is_base_class(info, baseinfo):
self.fail('Cycle in inheritance hierarchy', defn)
cycle = True
if baseinfo.fullname == 'builtins.bool':
self.fail('"%s" is not a valid base class' %
baseinfo.name, defn, blocker=True)
return False
dup = find_duplicate(info.direct_base_classes())
if dup:
self.fail('Duplicate base class "%s"' % dup.name, defn, blocker=True)
return False
return not cycle
def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool:
"""Determine if t is a base class of s (but do not use mro)."""
# Search the base class graph for t, starting from s.
worklist = [s]
visited = {s}
while worklist:
nxt = worklist.pop()
if nxt == t:
return True
for base in nxt.bases:
if base.type not in visited:
worklist.append(base.type)
visited.add(base.type)
return False
def analyze_metaclass(self, defn: ClassDef) -> None:
if defn.metaclass:
metaclass_name = None
if isinstance(defn.metaclass, NameExpr):
metaclass_name = defn.metaclass.name
elif isinstance(defn.metaclass, MemberExpr):
metaclass_name = get_member_expr_fullname(defn.metaclass)
if metaclass_name is None:
self.fail('Dynamic metaclass not supported for "%s"' % defn.name, defn.metaclass)
return
sym = self.lookup_qualified(metaclass_name, defn.metaclass)
if sym is None:
# Probably a name error - it is already handled elsewhere
return
if isinstance(sym.node, Var) and isinstance(get_proper_type(sym.node.type), AnyType):
# 'Any' metaclass -- just ignore it.
#
# TODO: A better approach would be to record this information
# and assume that the type object supports arbitrary
# attributes, similar to an 'Any' base class.
return
if isinstance(sym.node, PlaceholderNode):
self.defer(defn)
return
if not isinstance(sym.node, TypeInfo) or sym.node.tuple_type is not None:
self.fail('Invalid metaclass "%s"' % metaclass_name, defn.metaclass)
return
if not sym.node.is_metaclass():
self.fail('Metaclasses not inheriting from "type" are not supported',
defn.metaclass)
return
inst = fill_typevars(sym.node)
assert isinstance(inst, Instance)
defn.info.declared_metaclass = inst
defn.info.metaclass_type = defn.info.calculate_metaclass_type()
if any(info.is_protocol for info in defn.info.mro):
if (not defn.info.metaclass_type or
defn.info.metaclass_type.type.fullname == 'builtins.type'):
# All protocols and their subclasses have ABCMeta metaclass by default.
# TODO: add a metaclass conflict check if there is another metaclass.
abc_meta = self.named_type_or_none('abc.ABCMeta', [])
if abc_meta is not None: # May be None in tests with incomplete lib-stub.
defn.info.metaclass_type = abc_meta
if defn.info.metaclass_type is None:
# Inconsistency may happen due to multiple baseclasses even in classes that
# do not declare explicit metaclass, but it's harder to catch at this stage
if defn.metaclass is not None:
self.fail('Inconsistent metaclass structure for "%s"' % defn.name, defn)
else:
if defn.info.metaclass_type.type.has_base('enum.EnumMeta'):
defn.info.is_enum = True
if defn.type_vars:
self.fail("Enum class cannot be generic", defn)
#
# Imports
#
def visit_import(self, i: Import) -> None:
self.statement = i
for id, as_id in i.ids:
# Modules imported in a stub file without using 'import X as X' won't get exported
# When implicit re-exporting is disabled, we have the same behavior as stubs.
use_implicit_reexport = not self.is_stub_file and self.options.implicit_reexport
if as_id is not None:
base_id = id
imported_id = as_id
module_public = use_implicit_reexport or id.split(".")[-1] == as_id
else:
base_id = id.split('.')[0]
imported_id = base_id
module_public = use_implicit_reexport
self.add_module_symbol(base_id, imported_id, context=i, module_public=module_public,
module_hidden=not module_public)
def visit_import_from(self, imp: ImportFrom) -> None:
self.statement = imp
module_id = self.correct_relative_import(imp)
module = self.modules.get(module_id)
for id, as_id in imp.names:
fullname = module_id + '.' + id
self.set_future_import_flags(fullname)
if module is None:
node = None
elif module_id == self.cur_mod_id and fullname in self.modules:
# Submodule takes precedence over definition in surround package, for
# compatibility with runtime semantics in typical use cases. This
# could more precisely model runtime semantics by taking into account
# the line number beyond which the local definition should take
# precedence, but doesn't seem to be important in most use cases.
node = SymbolTableNode(GDEF, self.modules[fullname])
else:
if id == as_id == '__all__' and module_id in self.export_map:
self.all_exports[:] = self.export_map[module_id]
node = module.names.get(id)
missing_submodule = False
imported_id = as_id or id
# If the module does not contain a symbol with the name 'id',
# try checking if it's a module instead.
if not node:
mod = self.modules.get(fullname)
if mod is not None:
kind = self.current_symbol_kind()
node = SymbolTableNode(kind, mod)
elif fullname in self.missing_modules:
missing_submodule = True
# If it is still not resolved, check for a module level __getattr__
if (module and not node and (module.is_stub or self.options.python_version >= (3, 7))
and '__getattr__' in module.names):
# We store the fullname of the original definition so that we can
# detect whether two imported names refer to the same thing.
fullname = module_id + '.' + id
gvar = self.create_getattr_var(module.names['__getattr__'], imported_id, fullname)
if gvar:
self.add_symbol(imported_id, gvar, imp)
continue
# Modules imported in a stub file without using 'from Y import X as X' will
# not get exported.
# When implicit re-exporting is disabled, we have the same behavior as stubs.
use_implicit_reexport = not self.is_stub_file and self.options.implicit_reexport
module_public = use_implicit_reexport or (as_id is not None and id == as_id)
if node and not node.module_hidden:
self.process_imported_symbol(
node, module_id, id, imported_id, fullname, module_public, context=imp
)
elif module and not missing_submodule:
# Target module exists but the imported name is missing or hidden.
self.report_missing_module_attribute(
module_id, id, imported_id, module_public=module_public,
module_hidden=not module_public, context=imp
)
else:
# Import of a missing (sub)module.
self.add_unknown_imported_symbol(
imported_id, imp, target_name=fullname, module_public=module_public,
module_hidden=not module_public
)
def process_imported_symbol(self,
node: SymbolTableNode,
module_id: str,
id: str,
imported_id: str,
fullname: str,
module_public: bool,
context: ImportBase) -> None:
module_hidden = not module_public and fullname not in self.modules
if isinstance(node.node, PlaceholderNode):
if self.final_iteration:
self.report_missing_module_attribute(
module_id, id, imported_id, module_public=module_public,
module_hidden=module_hidden, context=context
)
return
else:
# This might become a type.
self.mark_incomplete(imported_id, node.node,
module_public=module_public,
module_hidden=module_hidden,
becomes_typeinfo=True)
existing_symbol = self.globals.get(imported_id)
if (existing_symbol and not isinstance(existing_symbol.node, PlaceholderNode) and
not isinstance(node.node, PlaceholderNode)):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
imported_id, existing_symbol, node, context):
return
if existing_symbol and isinstance(node.node, PlaceholderNode):
# Imports are special, some redefinitions are allowed, so wait until
# we know what is the new symbol node.
return
# NOTE: we take the original node even for final `Var`s. This is to support
# a common pattern when constants are re-exported (same applies to import *).
self.add_imported_symbol(imported_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
def report_missing_module_attribute(
self, import_id: str, source_id: str, imported_id: str, module_public: bool,
module_hidden: bool, context: Node
) -> None:
# Missing attribute.
if self.is_incomplete_namespace(import_id):
# We don't know whether the name will be there, since the namespace
# is incomplete. Defer the current target.
self.mark_incomplete(imported_id, context)
return
message = 'Module "{}" has no attribute "{}"'.format(import_id, source_id)
# Suggest alternatives, if any match is found.
module = self.modules.get(import_id)
if module:
if not self.options.implicit_reexport and source_id in module.names.keys():
message = ('Module "{}" does not explicitly export attribute "{}"'
'; implicit reexport disabled'.format(import_id, source_id))
else:
alternatives = set(module.names.keys()).difference({source_id})
matches = best_matches(source_id, alternatives)[:3]
if matches:
suggestion = "; maybe {}?".format(pretty_seq(matches, "or"))
message += "{}".format(suggestion)
self.fail(message, context, code=codes.ATTR_DEFINED)
self.add_unknown_imported_symbol(
imported_id, context, target_name=None, module_public=module_public,
module_hidden=not module_public
)
if import_id == 'typing':
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(source_id.lower())
if (self.lookup_fully_qualified_or_none(fullname) is None and
fullname in SUGGESTED_TEST_FIXTURES):
# Yes. Generate a helpful note.
self.msg.add_fixture_note(fullname, context)
def process_import_over_existing_name(self,
imported_id: str, existing_symbol: SymbolTableNode,
module_symbol: SymbolTableNode,
import_node: ImportBase) -> bool:
if existing_symbol.node is module_symbol.node:
# We added this symbol on previous iteration.
return False
if (existing_symbol.kind in (LDEF, GDEF, MDEF) and
isinstance(existing_symbol.node, (Var, FuncDef, TypeInfo, Decorator, TypeAlias))):
# This is a valid import over an existing definition in the file. Construct a dummy
# assignment that we'll use to type check the import.
lvalue = NameExpr(imported_id)
lvalue.kind = existing_symbol.kind
lvalue.node = existing_symbol.node
rvalue = NameExpr(imported_id)
rvalue.kind = module_symbol.kind
rvalue.node = module_symbol.node
if isinstance(rvalue.node, TypeAlias):
# Suppress bogus errors from the dummy assignment if rvalue is an alias.
# Otherwise mypy may complain that alias is invalid in runtime context.
rvalue.is_alias_rvalue = True
assignment = AssignmentStmt([lvalue], rvalue)
for node in assignment, lvalue, rvalue:
node.set_line(import_node)
import_node.assignments.append(assignment)
return True
return False
def correct_relative_import(self, node: Union[ImportFrom, ImportAll]) -> str:
import_id, ok = correct_relative_import(self.cur_mod_id, node.relative, node.id,
self.cur_mod_node.is_package_init_file())
if not ok:
self.fail("Relative import climbs too many namespaces", node)
return import_id
def visit_import_all(self, i: ImportAll) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
if self.is_incomplete_namespace(i_id):
# Any names could be missing from the current namespace if the target module
# namespace is incomplete.
self.mark_incomplete('*', i)
for name, node in m.names.items():
fullname = i_id + '.' + name
self.set_future_import_flags(fullname)
if node is None:
continue
# if '__all__' exists, all nodes not included have had module_public set to
# False, and we can skip checking '_' because it's been explicitly included.
if node.module_public and (not name.startswith('_') or '__all__' in m.names):
if isinstance(node.node, MypyFile):
# Star import of submodule from a package, add it as a dependency.
self.imports.add(node.node.fullname)
existing_symbol = self.lookup_current_scope(name)
if existing_symbol and not isinstance(node.node, PlaceholderNode):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
name, existing_symbol, node, i):
continue
# In stub files, `from x import *` always reexports the symbols.
# In regular files, only if implicit reexports are enabled.
module_public = self.is_stub_file or self.options.implicit_reexport
self.add_imported_symbol(name, node, i,
module_public=module_public,
module_hidden=not module_public)
else:
# Don't add any dummy symbols for 'from x import *' if 'x' is unknown.
pass
#
# Assignment
#
def visit_assignment_expr(self, s: AssignmentExpr) -> None:
s.value.accept(self)
self.analyze_lvalue(s.target, escape_comprehensions=True)
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.statement = s
# Special case assignment like X = X.
if self.analyze_identity_global_assignment(s):
return
tag = self.track_incomplete_refs()
s.rvalue.accept(self)
if self.found_incomplete_ref(tag) or self.should_wait_rhs(s.rvalue):
# Initializer couldn't be fully analyzed. Defer the current node and give up.
# Make sure that if we skip the definition of some local names, they can't be
# added later in this scope, since an earlier definition should take precedence.
for expr in names_modified_by_assignment(s):
self.mark_incomplete(expr.name, expr)
return
# The r.h.s. is now ready to be classified, first check if it is a special form:
special_form = False
# * type alias
if self.check_and_set_up_type_alias(s):
s.is_alias_def = True
special_form = True
# * type variable definition
elif self.process_typevar_declaration(s):
special_form = True
elif self.process_paramspec_declaration(s):
special_form = True
# * type constructors
elif self.analyze_namedtuple_assign(s):
special_form = True
elif self.analyze_typeddict_assign(s):
special_form = True
elif self.newtype_analyzer.process_newtype_declaration(s):
special_form = True
elif self.analyze_enum_assign(s):
special_form = True
if special_form:
self.record_special_form_lvalue(s)
return
# OK, this is a regular assignment, perform the necessary analysis steps.
s.is_final_def = self.unwrap_final(s)
self.analyze_lvalues(s)
self.check_final_implicit_def(s)
self.check_classvar(s)
self.process_type_annotation(s)
self.apply_dynamic_class_hook(s)
self.store_final_status(s)
if not s.type:
self.process_module_assignment(s.lvalues, s.rvalue, s)
self.process__all__(s)
self.process__deletable__(s)
def analyze_identity_global_assignment(self, s: AssignmentStmt) -> bool:
"""Special case 'X = X' in global scope.
This allows supporting some important use cases.
Return true if special casing was applied.
"""
if not isinstance(s.rvalue, NameExpr) or len(s.lvalues) != 1:
# Not of form 'X = X'
return False
lvalue = s.lvalues[0]
if not isinstance(lvalue, NameExpr) or s.rvalue.name != lvalue.name:
# Not of form 'X = X'
return False
if self.type is not None or self.is_func_scope():
# Not in global scope
return False
# It's an assignment like 'X = X' in the global scope.
name = lvalue.name
sym = self.lookup(name, s)
if sym is None:
if self.final_iteration:
# Fall back to normal assignment analysis.
return False
else:
self.defer()
return True
else:
if sym.node is None:
# Something special -- fall back to normal assignment analysis.
return False
if name not in self.globals:
# The name is from builtins. Add an alias to the current module.
self.add_symbol(name, sym.node, s)
if not isinstance(sym.node, PlaceholderNode):
for node in s.rvalue, lvalue:
node.node = sym.node
node.kind = GDEF
node.fullname = sym.node.fullname
return True
def should_wait_rhs(self, rv: Expression) -> bool:
"""Can we already classify this r.h.s. of an assignment or should we wait?
This returns True if we don't have enough information to decide whether
an assignment is just a normal variable definition or a special form.
Always return False if this is a final iteration. This will typically cause
the lvalue to be classified as a variable plus emit an error.
"""
if self.final_iteration:
# No chance, nothing has changed.
return False
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, IndexExpr) and isinstance(rv.base, RefExpr):
return self.should_wait_rhs(rv.base)
elif isinstance(rv, CallExpr) and isinstance(rv.callee, RefExpr):
# This is only relevant for builtin SCC where things like 'TypeVar'
# may be not ready.
return self.should_wait_rhs(rv.callee)
return False
def can_be_type_alias(self, rv: Expression, allow_none: bool = False) -> bool:
"""Is this a valid r.h.s. for an alias definition?
Note: this function should be only called for expressions where self.should_wait_rhs()
returns False.
"""
if isinstance(rv, RefExpr) and self.is_type_ref(rv, bare=True):
return True
if isinstance(rv, IndexExpr) and self.is_type_ref(rv.base, bare=False):
return True
if self.is_none_alias(rv):
return True
if allow_none and isinstance(rv, NameExpr) and rv.fullname == 'builtins.None':
return True
if (isinstance(rv, OpExpr)
and rv.op == '|'
and self.can_be_type_alias(rv.left, allow_none=True)
and self.can_be_type_alias(rv.right, allow_none=True)):
return True
return False
def is_type_ref(self, rv: Expression, bare: bool = False) -> bool:
"""Does this expression refer to a type?
This includes:
* Special forms, like Any or Union
* Classes (except subscripted enums)
* Other type aliases
* PlaceholderNodes with becomes_typeinfo=True (these can be not ready class
definitions, and not ready aliases).
If bare is True, this is not a base of an index expression, so some special
forms are not valid (like a bare Union).
Note: This method should be only used in context of a type alias definition.
This method can only return True for RefExprs, to check if C[int] is a valid
target for type alias call this method on expr.base (i.e. on C in C[int]).
See also can_be_type_alias().
"""
if not isinstance(rv, RefExpr):
return False
if isinstance(rv.node, TypeVarExpr):
self.fail('Type variable "{}" is invalid as target for type alias'.format(
rv.fullname), rv)
return False
if bare:
# These three are valid even if bare, for example
# A = Tuple is just equivalent to A = Tuple[Any, ...].
valid_refs = {'typing.Any', 'typing.Tuple', 'typing.Callable'}
else:
valid_refs = type_constructors
if isinstance(rv.node, TypeAlias) or rv.fullname in valid_refs:
return True
if isinstance(rv.node, TypeInfo):
if bare:
return True
# Assignment color = Color['RED'] defines a variable, not an alias.
return not rv.node.is_enum
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
# The r.h.s. for variable definitions may not be a type reference but just
# an instance attribute, so suppress the errors.
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
return False
def is_none_alias(self, node: Expression) -> bool:
"""Is this a r.h.s. for a None alias?
We special case the assignments like Void = type(None), to allow using
Void in type annotations.
"""
if isinstance(node, CallExpr):
if (isinstance(node.callee, NameExpr) and len(node.args) == 1 and
isinstance(node.args[0], NameExpr)):
call = self.lookup_qualified(node.callee.name, node.callee)
arg = self.lookup_qualified(node.args[0].name, node.args[0])
if (call is not None and call.node and call.node.fullname == 'builtins.type' and
arg is not None and arg.node and arg.node.fullname == 'builtins.None'):
return True
return False
def record_special_form_lvalue(self, s: AssignmentStmt) -> None:
"""Record minimal necessary information about l.h.s. of a special form.
This exists mostly for compatibility with the old semantic analyzer.
"""
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
lvalue.is_special_form = True
if self.current_symbol_kind() == GDEF:
lvalue.fullname = self.qualified_name(lvalue.name)
lvalue.kind = self.current_symbol_kind()
def analyze_enum_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines an Enum."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, EnumCallExpr):
# Already analyzed enum -- nothing to do here.
return True
return self.enum_call_analyzer.process_enum_call(s, self.is_func_scope())
def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a namedtuple."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, NamedTupleExpr):
return True # This is a valid and analyzed named tuple definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
name = lvalue.name
internal_name, info = self.named_tuple_analyzer.check_namedtuple(s.rvalue, name,
self.is_func_scope())
if internal_name is None:
return False
if isinstance(lvalue, MemberExpr):
self.fail("NamedTuple type as an attribute is not supported", lvalue)
return False
if internal_name != name:
self.fail('First argument to namedtuple() should be "{}", not "{}"'.format(
name, internal_name), s.rvalue, code=codes.NAME_MATCH)
return True
# Yes, it's a valid namedtuple, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_typeddict_assign(self, s: AssignmentStmt) -> bool:
"""Check if s defines a typed dict."""
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, TypedDictExpr):
return True # This is a valid and analyzed typed dict definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_typed_dict, info = self.typed_dict_analyzer.check_typeddict(s.rvalue, name,
self.is_func_scope())
if not is_typed_dict:
return False
if isinstance(lvalue, MemberExpr):
self.fail("TypedDict type as attribute is not supported", lvalue)
return False
# Yes, it's a valid typed dict, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_lvalues(self, s: AssignmentStmt) -> None:
# We cannot use s.type, because analyze_simple_literal_type() will set it.
explicit = s.unanalyzed_type is not None
if self.is_final_type(s.unanalyzed_type):
# We need to exclude bare Final.
assert isinstance(s.unanalyzed_type, UnboundType)
if not s.unanalyzed_type.args:
explicit = False
for lval in s.lvalues:
self.analyze_lvalue(lval,
explicit_type=explicit,
is_final=s.is_final_def)
def apply_dynamic_class_hook(self, s: AssignmentStmt) -> None:
if len(s.lvalues) > 1:
return
lval = s.lvalues[0]
if not isinstance(lval, NameExpr) or not isinstance(s.rvalue, CallExpr):
return
call = s.rvalue
fname = None
if isinstance(call.callee, RefExpr):
fname = call.callee.fullname
# check if method call
if fname is None and isinstance(call.callee, MemberExpr):
callee_expr = call.callee.expr
if isinstance(callee_expr, RefExpr) and callee_expr.fullname:
method_name = call.callee.name
fname = callee_expr.fullname + '.' + method_name
if fname:
hook = self.plugin.get_dynamic_class_hook(fname)
if hook:
hook(DynamicClassDefContext(call, lval.name, self))
def unwrap_final(self, s: AssignmentStmt) -> bool:
"""Strip Final[...] if present in an assignment.
This is done to invoke type inference during type checking phase for this
assignment. Also, Final[...] doesn't affect type in any way -- it is rather an
access qualifier for given `Var`.
Also perform various consistency checks.
Returns True if Final[...] was present.
"""
if not s.unanalyzed_type or not self.is_final_type(s.unanalyzed_type):
return False
assert isinstance(s.unanalyzed_type, UnboundType)
if len(s.unanalyzed_type.args) > 1:
self.fail("Final[...] takes at most one type argument", s.unanalyzed_type)
invalid_bare_final = False
if not s.unanalyzed_type.args:
s.type = None
if isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs:
invalid_bare_final = True
self.fail("Type in Final[...] can only be omitted if there is an initializer", s)
else:
s.type = s.unanalyzed_type.args[0]
if s.type is not None and self.is_classvar(s.type):
self.fail("Variable should not be annotated with both ClassVar and Final", s)
return False
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], RefExpr):
self.fail("Invalid final declaration", s)
return False
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
# Reset inferred status if it was set due to simple literal rvalue on previous iteration.
# TODO: this is a best-effort quick fix, we should avoid the need to manually sync this,
# see https://github.com/python/mypy/issues/6458.
if lval.is_new_def:
lval.is_inferred_def = s.type is None
if self.loop_depth > 0:
self.fail("Cannot use Final inside a loop", s)
if self.type and self.type.is_protocol:
self.msg.protocol_members_cant_be_final(s)
if (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs and
not self.is_stub_file and not self.is_class_scope()):
if not invalid_bare_final: # Skip extra error messages.
self.msg.final_without_value(s)
return True
def check_final_implicit_def(self, s: AssignmentStmt) -> None:
"""Do basic checks for final declaration on self in __init__.
Additional re-definition checks are performed by `analyze_lvalue`.
"""
if not s.is_final_def:
return
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if isinstance(lval, MemberExpr):
if not self.is_self_member_ref(lval):
self.fail("Final can be only applied to a name or an attribute on self", s)
s.is_final_def = False
return
else:
assert self.function_stack
if self.function_stack[-1].name != '__init__':
self.fail("Can only declare a final attribute in class body or __init__", s)
s.is_final_def = False
return
def store_final_status(self, s: AssignmentStmt) -> None:
"""If this is a locally valid final declaration, set the corresponding flag on `Var`."""
if s.is_final_def:
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
node = s.lvalues[0].node
if isinstance(node, Var):
node.is_final = True
node.final_value = self.unbox_literal(s.rvalue)
if (self.is_class_scope() and
(isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
node.final_unset_in_class = True
else:
# Special case: deferred initialization of a final attribute in __init__.
# In this case we just pretend this is a valid final definition to suppress
# errors about assigning to final attribute.
for lval in self.flatten_lvalues(s.lvalues):
if isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name, None)
if cur_node and isinstance(cur_node.node, Var) and cur_node.node.is_final:
assert self.function_stack
top_function = self.function_stack[-1]
if (top_function.name == '__init__' and
cur_node.node.final_unset_in_class and
not cur_node.node.final_set_in_init and
not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
cur_node.node.final_set_in_init = True
s.is_final_def = True
def flatten_lvalues(self, lvalues: List[Expression]) -> List[Expression]:
res: List[Expression] = []
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
else:
res.append(lv)
return res
def unbox_literal(self, e: Expression) -> Optional[Union[int, float, bool, str]]:
if isinstance(e, (IntExpr, FloatExpr, StrExpr)):
return e.value
elif isinstance(e, NameExpr) and e.name in ('True', 'False'):
return True if e.name == 'True' else False
return None
def process_type_annotation(self, s: AssignmentStmt) -> None:
"""Analyze type annotation or infer simple literal type."""
if s.type:
lvalue = s.lvalues[-1]
allow_tuple_literal = isinstance(lvalue, TupleExpr)
analyzed = self.anal_type(s.type, allow_tuple_literal=allow_tuple_literal)
# Don't store not ready types (including placeholders).
if analyzed is None or has_placeholder(analyzed):
return
s.type = analyzed
if (self.type and self.type.is_protocol and isinstance(lvalue, NameExpr) and
isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs):
if isinstance(lvalue.node, Var):
lvalue.node.is_abstract_var = True
else:
if (self.type and self.type.is_protocol and
self.is_annotated_protocol_member(s) and not self.is_func_scope()):
self.fail('All protocol members must have explicitly declared types', s)
# Set the type if the rvalue is a simple literal (even if the above error occurred).
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
if s.lvalues[0].is_inferred_def:
s.type = self.analyze_simple_literal_type(s.rvalue, s.is_final_def)
if s.type:
# Store type into nodes.
for lvalue in s.lvalues:
self.store_declared_types(lvalue, s.type)
def is_annotated_protocol_member(self, s: AssignmentStmt) -> bool:
"""Check whether a protocol member is annotated.
There are some exceptions that can be left unannotated, like ``__slots__``."""
return any(
(
isinstance(lv, NameExpr)
and lv.name != '__slots__'
and lv.is_inferred_def
)
for lv in s.lvalues
)
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Optional[Type]:
"""Return builtins.int if rvalue is an int literal, etc.
If this is a 'Final' context, we return "Literal[...]" instead."""
if self.options.semantic_analysis_only or self.function_stack:
# Skip this if we're only doing the semantic analysis pass.
# This is mostly to avoid breaking unit tests.
# Also skip inside a function; this is to avoid confusing
# the code that handles dead code due to isinstance()
# inside type variables with value restrictions (like
# AnyStr).
return None
if isinstance(rvalue, FloatExpr):
return self.named_type_or_none('builtins.float')
value: Optional[LiteralValue] = None
type_name: Optional[str] = None
if isinstance(rvalue, IntExpr):
value, type_name = rvalue.value, 'builtins.int'
if isinstance(rvalue, StrExpr):
value, type_name = rvalue.value, 'builtins.str'
if isinstance(rvalue, BytesExpr):
value, type_name = rvalue.value, 'builtins.bytes'
if isinstance(rvalue, UnicodeExpr):
value, type_name = rvalue.value, 'builtins.unicode'
if type_name is not None:
assert value is not None
typ = self.named_type_or_none(type_name)
if typ and is_final:
return typ.copy_modified(last_known_value=LiteralType(
value=value,
fallback=typ,
line=typ.line,
column=typ.column,
))
return typ
return None
def analyze_alias(self, rvalue: Expression,
allow_placeholder: bool = False) -> Tuple[Optional[Type], List[str],
Set[str], List[str]]:
"""Check if 'rvalue' is a valid type allowed for aliasing (e.g. not a type variable).
If yes, return the corresponding type, a list of
qualified type variable names for generic aliases, a set of names the alias depends on,
and a list of type variables if the alias is generic.
An schematic example for the dependencies:
A = int
B = str
analyze_alias(Dict[A, B])[2] == {'__main__.A', '__main__.B'}
"""
dynamic = bool(self.function_stack and self.function_stack[-1].is_dynamic())
global_scope = not self.type and not self.function_stack
res = analyze_type_alias(rvalue,
self,
self.tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_new_syntax=self.is_stub_file,
allow_placeholder=allow_placeholder,
in_dynamic_func=dynamic,
global_scope=global_scope)
typ: Optional[Type] = None
if res:
typ, depends_on = res
found_type_vars = typ.accept(TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope))
alias_tvars = [name for (name, node) in found_type_vars]
qualified_tvars = [node.fullname for (name, node) in found_type_vars]
else:
alias_tvars = []
depends_on = set()
qualified_tvars = []
return typ, alias_tvars, depends_on, qualified_tvars
def check_and_set_up_type_alias(self, s: AssignmentStmt) -> bool:
"""Check if assignment creates a type alias and set it up as needed.
Return True if it is a type alias (even if the target is not ready),
or False otherwise.
Note: the resulting types for subscripted (including generic) aliases
are also stored in rvalue.analyzed.
"""
lvalue = s.lvalues[0]
if len(s.lvalues) > 1 or not isinstance(lvalue, NameExpr):
# First rule: Only simple assignments like Alias = ... create aliases.
return False
if s.unanalyzed_type is not None:
# Second rule: Explicit type (cls: Type[A] = A) always creates variable, not alias.
return False
existing = self.current_symbol_table().get(lvalue.name)
# Third rule: type aliases can't be re-defined. For example:
# A: Type[float] = int
# A = float # OK, but this doesn't define an alias
# B = int
# B = float # Error!
# Don't create an alias in these cases:
if (existing
and (isinstance(existing.node, Var) # existing variable
or (isinstance(existing.node, TypeAlias)
and not s.is_alias_def) # existing alias
or (isinstance(existing.node, PlaceholderNode)
and existing.node.node.line < s.line))): # previous incomplete definition
# TODO: find a more robust way to track the order of definitions.
# Note: if is_alias_def=True, this is just a node from previous iteration.
if isinstance(existing.node, TypeAlias) and not s.is_alias_def:
self.fail('Cannot assign multiple types to name "{}"'
' without an explicit "Type[...]" annotation'
.format(lvalue.name), lvalue)
return False
non_global_scope = self.type or self.is_func_scope()
if isinstance(s.rvalue, RefExpr) and non_global_scope:
# Fourth rule (special case): Non-subscripted right hand side creates a variable
# at class and function scopes. For example:
#
# class Model:
# ...
# class C:
# model = Model # this is automatically a variable with type 'Type[Model]'
#
# without this rule, this typical use case will require a lot of explicit
# annotations (see the second rule).
return False
rvalue = s.rvalue
if not self.can_be_type_alias(rvalue):
return False
if existing and not isinstance(existing.node, (PlaceholderNode, TypeAlias)):
# Cannot redefine existing node as type alias.
return False
res: Optional[Type] = None
if self.is_none_alias(rvalue):
res = NoneType()
alias_tvars, depends_on, qualified_tvars = \
[], set(), [] # type: List[str], Set[str], List[str]
else:
tag = self.track_incomplete_refs()
res, alias_tvars, depends_on, qualified_tvars = \
self.analyze_alias(rvalue, allow_placeholder=True)
if not res:
return False
# TODO: Maybe we only need to reject top-level placeholders, similar
# to base classes.
if self.found_incomplete_ref(tag) or has_placeholder(res):
# Since we have got here, we know this must be a type alias (incomplete refs
# may appear in nested positions), therefore use becomes_typeinfo=True.
self.mark_incomplete(lvalue.name, rvalue, becomes_typeinfo=True)
return True
self.add_type_alias_deps(depends_on)
# In addition to the aliases used, we add deps on unbound
# type variables, since they are erased from target type.
self.add_type_alias_deps(qualified_tvars)
# The above are only direct deps on other aliases.
# For subscripted aliases, type deps from expansion are added in deps.py
# (because the type is stored).
check_for_explicit_any(res, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# When this type alias gets "inlined", the Any is not explicit anymore,
# so we need to replace it with non-explicit Anys.
if not has_placeholder(res):
res = make_any_non_explicit(res)
# Note: with the new (lazy) type alias representation we only need to set no_args to True
# if the expected number of arguments is non-zero, so that aliases like A = List work.
# However, eagerly expanding aliases like Text = str is a nice performance optimization.
no_args = isinstance(res, Instance) and not res.args # type: ignore[misc]
fix_instance_types(res, self.fail, self.note, self.options.python_version)
# Aliases defined within functions can't be accessed outside
# the function, since the symbol table will no longer
# exist. Work around by expanding them eagerly when used.
eager = self.is_func_scope()
alias_node = TypeAlias(res,
self.qualified_name(lvalue.name),
s.line,
s.column,
alias_tvars=alias_tvars,
no_args=no_args,
eager=eager)
if isinstance(s.rvalue, (IndexExpr, CallExpr)): # CallExpr is for `void = type(None)`
s.rvalue.analyzed = TypeAliasExpr(alias_node)
s.rvalue.analyzed.line = s.line
# we use the column from resulting target, to get better location for errors
s.rvalue.analyzed.column = res.column
elif isinstance(s.rvalue, RefExpr):
s.rvalue.is_alias_rvalue = True
if existing:
# An alias gets updated.
updated = False
if isinstance(existing.node, TypeAlias):
if existing.node.target != res:
# Copy expansion to the existing alias, this matches how we update base classes
# for a TypeInfo _in place_ if there are nested placeholders.
existing.node.target = res
existing.node.alias_tvars = alias_tvars
existing.node.no_args = no_args
updated = True
else:
# Otherwise just replace existing placeholder with type alias.
existing.node = alias_node
updated = True
if updated:
if self.final_iteration:
self.cannot_resolve_name(lvalue.name, 'name', s)
return True
else:
self.progress = True
# We need to defer so that this change can get propagated to base classes.
self.defer(s)
else:
self.add_symbol(lvalue.name, alias_node, s)
if isinstance(rvalue, RefExpr) and isinstance(rvalue.node, TypeAlias):
alias_node.normalized = rvalue.node.normalized
return True
def analyze_lvalue(self,
lval: Lvalue,
nested: bool = False,
explicit_type: bool = False,
is_final: bool = False,
escape_comprehensions: bool = False) -> None:
"""Analyze an lvalue or assignment target.
Args:
lval: The target lvalue
nested: If true, the lvalue is within a tuple or list lvalue expression
explicit_type: Assignment has type annotation
escape_comprehensions: If we are inside a comprehension, set the variable
in the enclosing scope instead. This implements
https://www.python.org/dev/peps/pep-0572/#scope-of-the-target
"""
if escape_comprehensions:
assert isinstance(lval, NameExpr), "assignment expression target must be NameExpr"
if isinstance(lval, NameExpr):
self.analyze_name_lvalue(lval, explicit_type, is_final, escape_comprehensions)
elif isinstance(lval, MemberExpr):
self.analyze_member_lvalue(lval, explicit_type, is_final)
if explicit_type and not self.is_self_member_ref(lval):
self.fail('Type cannot be declared in assignment to non-self '
'attribute', lval)
elif isinstance(lval, IndexExpr):
if explicit_type:
self.fail('Unexpected type declaration', lval)
lval.accept(self)
elif isinstance(lval, TupleExpr):
self.analyze_tuple_or_list_lvalue(lval, explicit_type)
elif isinstance(lval, StarExpr):
if nested:
self.analyze_lvalue(lval.expr, nested, explicit_type)
else:
self.fail('Starred assignment target must be in a list or tuple', lval)
else:
self.fail('Invalid assignment target', lval)
def analyze_name_lvalue(self,
lvalue: NameExpr,
explicit_type: bool,
is_final: bool,
escape_comprehensions: bool) -> None:
"""Analyze an lvalue that targets a name expression.
Arguments are similar to "analyze_lvalue".
"""
if lvalue.node:
# This has been bound already in a previous iteration.
return
name = lvalue.name
if self.is_alias_for_final_name(name):
if is_final:
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.msg.cant_assign_to_final(name, self.type is not None, lvalue)
kind = self.current_symbol_kind()
names = self.current_symbol_table()
existing = names.get(name)
outer = self.is_global_or_nonlocal(name)
if (not existing or isinstance(existing.node, PlaceholderNode)) and not outer:
# Define new variable.
var = self.make_name_lvalue_var(lvalue, kind, not explicit_type)
added = self.add_symbol(name, var, lvalue, escape_comprehensions=escape_comprehensions)
# Only bind expression if we successfully added name to symbol table.
if added:
lvalue.is_new_def = True
lvalue.is_inferred_def = True
lvalue.kind = kind
lvalue.node = var
if kind == GDEF:
lvalue.fullname = var._fullname
else:
lvalue.fullname = lvalue.name
if self.is_func_scope():
if unmangle(name) == '_':
# Special case for assignment to local named '_': always infer 'Any'.
typ = AnyType(TypeOfAny.special_form)
self.store_declared_types(lvalue, typ)
if is_final and self.is_final_redefinition(kind, name):
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.make_name_lvalue_point_to_existing_def(lvalue, explicit_type, is_final)
def is_final_redefinition(self, kind: int, name: str) -> bool:
if kind == GDEF:
return self.is_mangled_global(name) and not self.is_initial_mangled_global(name)
elif kind == MDEF and self.type:
return unmangle(name) + "'" in self.type.names
return False
def is_alias_for_final_name(self, name: str) -> bool:
if self.is_func_scope():
if not name.endswith("'"):
# Not a mangled name -- can't be an alias
return False
name = unmangle(name)
assert self.locals[-1] is not None, "No locals at function scope"
existing = self.locals[-1].get(name)
return existing is not None and is_final_node(existing.node)
elif self.type is not None:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.type.names.get(orig_name)
return existing is not None and is_final_node(existing.node)
else:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.globals.get(orig_name)
return existing is not None and is_final_node(existing.node)
def make_name_lvalue_var(self, lvalue: NameExpr, kind: int, inferred: bool) -> Var:
"""Return a Var node for an lvalue that is a name expression."""
v = Var(lvalue.name)
v.set_line(lvalue)
v.is_inferred = inferred
if kind == MDEF:
assert self.type is not None
v.info = self.type
v.is_initialized_in_class = True
if kind != LDEF:
v._fullname = self.qualified_name(lvalue.name)
else:
# fullanme should never stay None
v._fullname = lvalue.name
v.is_ready = False # Type not inferred yet
return v
def make_name_lvalue_point_to_existing_def(
self,
lval: NameExpr,
explicit_type: bool,
is_final: bool) -> None:
"""Update an lvalue to point to existing definition in the same scope.
Arguments are similar to "analyze_lvalue".
Assume that an existing name exists.
"""
if is_final:
# Redefining an existing name with final is always an error.
self.fail("Cannot redefine an existing name as final", lval)
original_def = self.lookup(lval.name, lval, suppress_errors=True)
if original_def is None and self.type and not self.is_func_scope():
# Workaround to allow "x, x = ..." in class body.
original_def = self.type.get(lval.name)
if explicit_type:
# Don't re-bind if there is a type annotation.
self.name_already_defined(lval.name, lval, original_def)
else:
# Bind to an existing name.
if original_def:
self.bind_name_expr(lval, original_def)
else:
self.name_not_defined(lval.name, lval)
self.check_lvalue_validity(lval.node, lval)
def analyze_tuple_or_list_lvalue(self, lval: TupleExpr,
explicit_type: bool = False) -> None:
"""Analyze an lvalue or assignment target that is a list or tuple."""
items = lval.items
star_exprs = [item for item in items if isinstance(item, StarExpr)]
if len(star_exprs) > 1:
self.fail('Two starred expressions in assignment', lval)
else:
if len(star_exprs) == 1:
star_exprs[0].valid = True
for i in items:
self.analyze_lvalue(i, nested=True, explicit_type=explicit_type)
def analyze_member_lvalue(self, lval: MemberExpr, explicit_type: bool, is_final: bool) -> None:
"""Analyze lvalue that is a member expression.
Arguments:
lval: The target lvalue
explicit_type: Assignment has type annotation
is_final: Is the target final
"""
if lval.node:
# This has been bound already in a previous iteration.
return
lval.accept(self)
if self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name)
node = self.type.get(lval.name)
if cur_node and is_final:
# Overrides will be checked in type checker.
self.fail("Cannot redefine an existing name as final", lval)
# On first encounter with this definition, if this attribute was defined before
# with an inferred type and it's marked with an explicit type now, give an error.
if (not lval.node and cur_node and isinstance(cur_node.node, Var) and
cur_node.node.is_inferred and explicit_type):
self.attribute_already_defined(lval.name, lval, cur_node)
# If the attribute of self is not defined in superclasses, create a new Var, ...
if (node is None
or (isinstance(node.node, Var) and node.node.is_abstract_var)
# ... also an explicit declaration on self also creates a new Var.
# Note that `explicit_type` might has been erased for bare `Final`,
# so we also check if `is_final` is passed.
or (cur_node is None and (explicit_type or is_final))):
if self.type.is_protocol and node is None:
self.fail("Protocol members cannot be defined via assignment to self", lval)
else:
# Implicit attribute definition in __init__.
lval.is_new_def = True
lval.is_inferred_def = True
v = Var(lval.name)
v.set_line(lval)
v._fullname = self.qualified_name(lval.name)
v.info = self.type
v.is_ready = False
v.explicit_self_type = explicit_type or is_final
lval.def_var = v
lval.node = v
# TODO: should we also set lval.kind = MDEF?
self.type.names[lval.name] = SymbolTableNode(MDEF, v, implicit=True)
self.check_lvalue_validity(lval.node, lval)
def is_self_member_ref(self, memberexpr: MemberExpr) -> bool:
"""Does memberexpr to refer to an attribute of self?"""
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
def check_lvalue_validity(self, node: Union[Expression, SymbolNode, None],
ctx: Context) -> None:
if isinstance(node, TypeVarExpr):
self.fail('Invalid assignment target', ctx)
elif isinstance(node, TypeInfo):
self.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, ctx)
def store_declared_types(self, lvalue: Lvalue, typ: Type) -> None:
if isinstance(typ, StarType) and not isinstance(lvalue, StarExpr):
self.fail('Star type only allowed for starred expressions', lvalue)
if isinstance(lvalue, RefExpr):
lvalue.is_inferred_def = False
if isinstance(lvalue.node, Var):
var = lvalue.node
var.type = typ
var.is_ready = True
# If node is not a variable, we'll catch it elsewhere.
elif isinstance(lvalue, TupleExpr):
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
if len(lvalue.items) != len(typ.items):
self.fail('Incompatible number of tuple items', lvalue)
return
for item, itemtype in zip(lvalue.items, typ.items):
self.store_declared_types(item, itemtype)
else:
self.fail('Tuple type expected for multiple variables',
lvalue)
elif isinstance(lvalue, StarExpr):
# Historical behavior for the old parser
if isinstance(typ, StarType):
self.store_declared_types(lvalue.expr, typ.type)
else:
self.store_declared_types(lvalue.expr, typ)
else:
# This has been flagged elsewhere as an error, so just ignore here.
pass
def process_typevar_declaration(self, s: AssignmentStmt) -> bool:
"""Check if s declares a TypeVar; it yes, store it in symbol table.
Return True if this looks like a type variable declaration (but maybe
with errors), otherwise return False.
"""
call = self.get_typevarlike_declaration(s, ("typing.TypeVar",))
if not call:
return False
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a type variable", s)
return False
name = lvalue.name
if not self.check_typevarlike_name(call, name, s):
return False
# Constraining types
n_values = call.arg_kinds[1:].count(ARG_POS)
values = self.analyze_value_types(call.args[1:1 + n_values])
res = self.process_typevar_parameters(call.args[1 + n_values:],
call.arg_names[1 + n_values:],
call.arg_kinds[1 + n_values:],
n_values,
s)
if res is None:
return False
variance, upper_bound = res
existing = self.current_symbol_table().get(name)
if existing and not (isinstance(existing.node, PlaceholderNode) or
# Also give error for another type variable with the same name.
(isinstance(existing.node, TypeVarExpr) and
existing.node is call.analyzed)):
self.fail('Cannot redefine "%s" as a type variable' % name, s)
return False
if self.options.disallow_any_unimported:
for idx, constraint in enumerate(values, start=1):
if has_any_from_unimported_type(constraint):
prefix = "Constraint {}".format(idx)
self.msg.unimported_type_becomes_any(prefix, constraint, s)
if has_any_from_unimported_type(upper_bound):
prefix = "Upper bound of type variable"
self.msg.unimported_type_becomes_any(prefix, upper_bound, s)
for t in values + [upper_bound]:
check_for_explicit_any(t, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# mypyc suppresses making copies of a function to check each
# possible type, so set the upper bound to Any to prevent that
# from causing errors.
if values and self.options.mypyc:
upper_bound = AnyType(TypeOfAny.implementation_artifact)
# Yes, it's a valid type variable definition! Add it to the symbol table.
if not call.analyzed:
type_var = TypeVarExpr(name, self.qualified_name(name),
values, upper_bound, variance)
type_var.line = call.line
call.analyzed = type_var
else:
assert isinstance(call.analyzed, TypeVarExpr)
if call.analyzed.values != values or call.analyzed.upper_bound != upper_bound:
self.progress = True
call.analyzed.upper_bound = upper_bound
call.analyzed.values = values
self.add_symbol(name, call.analyzed, s)
return True
def check_typevarlike_name(self, call: CallExpr, name: str, context: Context) -> bool:
"""Checks that the name of a TypeVar or ParamSpec matches its variable."""
name = unmangle(name)
assert isinstance(call.callee, RefExpr)
typevarlike_type = (
call.callee.name if isinstance(call.callee, NameExpr) else call.callee.fullname
)
if len(call.args) < 1:
self.fail("Too few arguments for {}()".format(typevarlike_type), context)
return False
if (not isinstance(call.args[0], (StrExpr, BytesExpr, UnicodeExpr))
or not call.arg_kinds[0] == ARG_POS):
self.fail("{}() expects a string literal as first argument".format(typevarlike_type),
context)
return False
elif call.args[0].value != name:
msg = 'String argument 1 "{}" to {}(...) does not match variable name "{}"'
self.fail(msg.format(call.args[0].value, typevarlike_type, name), context)
return False
return True
def get_typevarlike_declaration(self, s: AssignmentStmt,
typevarlike_types: Tuple[str, ...]) -> Optional[CallExpr]:
"""Returns the call expression if `s` is a declaration of `typevarlike_type`
(TypeVar or ParamSpec), or None otherwise.
"""
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return None
if not isinstance(s.rvalue, CallExpr):
return None
call = s.rvalue
callee = call.callee
if not isinstance(callee, RefExpr):
return None
if callee.fullname not in typevarlike_types:
return None
return call
def process_typevar_parameters(self, args: List[Expression],
names: List[Optional[str]],
kinds: List[ArgKind],
num_values: int,
context: Context) -> Optional[Tuple[int, Type]]:
has_values = (num_values > 0)
covariant = False
contravariant = False
upper_bound: Type = self.object_type()
for param_value, param_name, param_kind in zip(args, names, kinds):
if not param_kind == ARG_NAMED:
self.fail("Unexpected argument to TypeVar()", context)
return None
if param_name == 'covariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
covariant = True
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
elif param_name == 'contravariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
contravariant = True
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
elif param_name == 'bound':
if has_values:
self.fail("TypeVar cannot have both values and an upper bound", context)
return None
try:
# We want to use our custom error message below, so we suppress
# the default error message for invalid types here.
analyzed = self.expr_to_analyzed_type(param_value,
allow_placeholder=True,
report_invalid_types=False)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if upper bound is not ready yet. Otherwise avoiding
# a "deadlock" in this common pattern would be tricky:
# T = TypeVar('T', bound=Custom[Any])
# class Custom(Generic[T]):
# ...
analyzed = PlaceholderType(None, [], context.line)
upper_bound = get_proper_type(analyzed)
if isinstance(upper_bound, AnyType) and upper_bound.is_from_error:
self.fail('TypeVar "bound" must be a type', param_value)
# Note: we do not return 'None' here -- we want to continue
# using the AnyType as the upper bound.
except TypeTranslationError:
self.fail('TypeVar "bound" must be a type', param_value)
return None
elif param_name == 'values':
# Probably using obsolete syntax with values=(...). Explain the current syntax.
self.fail('TypeVar "values" argument not supported', context)
self.fail("Use TypeVar('T', t, ...) instead of TypeVar('T', values=(t, ...))",
context)
return None
else:
self.fail('Unexpected argument to TypeVar(): "{}"'.format(param_name), context)
return None
if covariant and contravariant:
self.fail("TypeVar cannot be both covariant and contravariant", context)
return None
elif num_values == 1:
self.fail("TypeVar cannot have only a single constraint", context)
return None
elif covariant:
variance = COVARIANT
elif contravariant:
variance = CONTRAVARIANT
else:
variance = INVARIANT
return variance, upper_bound
def process_paramspec_declaration(self, s: AssignmentStmt) -> bool:
"""Checks if s declares a ParamSpec; if yes, store it in symbol table.
Return True if this looks like a ParamSpec (maybe with errors), otherwise return False.
In the future, ParamSpec may accept bounds and variance arguments, in which
case more aggressive sharing of code with process_typevar_declaration should be pursued.
"""
if not self.options.wip_pep_612:
return False
call = self.get_typevarlike_declaration(
s, ("typing_extensions.ParamSpec", "typing.ParamSpec")
)
if not call:
return False
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a parameter specification", s)
return False
name = lvalue.name
if not self.check_typevarlike_name(call, name, s):
return False
# PEP 612 reserves the right to define bound, covariant and contravariant arguments to
# ParamSpec in a later PEP. If and when that happens, we should do something
# on the lines of process_typevar_parameters
if not call.analyzed:
paramspec_var = ParamSpecExpr(
name, self.qualified_name(name), self.object_type(), INVARIANT
)
paramspec_var.line = call.line
call.analyzed = paramspec_var
else:
assert isinstance(call.analyzed, ParamSpecExpr)
self.add_symbol(name, call.analyzed, s)
return True
def basic_new_typeinfo(self, name: str,
basetype_or_fallback: Instance,
line: int) -> TypeInfo:
if self.is_func_scope() and not self.type and '@' not in name:
name += '@' + str(line)
class_def = ClassDef(name, Block([]))
if self.is_func_scope() and not self.type:
# Full names of generated classes should always be prefixed with the module names
# even if they are nested in a function, since these classes will be (de-)serialized.
# (Note that the caller should append @line to the name to avoid collisions.)
# TODO: clean this up, see #6422.
class_def.fullname = self.cur_mod_id + '.' + self.qualified_name(name)
else:
class_def.fullname = self.qualified_name(name)
info = TypeInfo(SymbolTable(), class_def, self.cur_mod_id)
class_def.info = info
mro = basetype_or_fallback.type.mro
if not mro:
# Forward reference, MRO should be recalculated in third pass.
mro = [basetype_or_fallback.type, self.object_type().type]
info.mro = [info] + mro
info.bases = [basetype_or_fallback]
return info
def analyze_value_types(self, items: List[Expression]) -> List[Type]:
"""Analyze types from values expressions in type variable definition."""
result: List[Type] = []
for node in items:
try:
analyzed = self.anal_type(self.expr_to_unanalyzed_type(node),
allow_placeholder=True)
if analyzed is None:
# Type variables are special: we need to place them in the symbol table
# soon, even if some value is not ready yet, see process_typevar_parameters()
# for an example.
analyzed = PlaceholderType(None, [], node.line)
result.append(analyzed)
except TypeTranslationError:
self.fail('Type expected', node)
result.append(AnyType(TypeOfAny.from_error))
return result
def check_classvar(self, s: AssignmentStmt) -> None:
"""Check if assignment defines a class variable."""
lvalue = s.lvalues[0]
if len(s.lvalues) != 1 or not isinstance(lvalue, RefExpr):
return
if not s.type or not self.is_classvar(s.type):
return
if self.is_class_scope() and isinstance(lvalue, NameExpr):
node = lvalue.node
if isinstance(node, Var):
node.is_classvar = True
elif not isinstance(lvalue, MemberExpr) or self.is_self_member_ref(lvalue):
# In case of member access, report error only when assigning to self
# Other kinds of member assignments should be already reported
self.fail_invalid_classvar(lvalue)
def is_classvar(self, typ: Type) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname == 'typing.ClassVar'
def is_final_type(self, typ: Optional[Type]) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname in ('typing.Final', 'typing_extensions.Final')
def fail_invalid_classvar(self, context: Context) -> None:
self.fail('ClassVar can only be used for assignments in class body', context)
def process_module_assignment(self, lvals: List[Lvalue], rval: Expression,
ctx: AssignmentStmt) -> None:
"""Propagate module references across assignments.
Recursively handles the simple form of iterable unpacking; doesn't
handle advanced unpacking with *rest, dictionary unpacking, etc.
In an expression like x = y = z, z is the rval and lvals will be [x,
y].
"""
if (isinstance(rval, (TupleExpr, ListExpr))
and all(isinstance(v, TupleExpr) for v in lvals)):
# rval and all lvals are either list or tuple, so we are dealing
# with unpacking assignment like `x, y = a, b`. Mypy didn't
# understand our all(isinstance(...)), so cast them as TupleExpr
# so mypy knows it is safe to access their .items attribute.
seq_lvals = cast(List[TupleExpr], lvals)
# given an assignment like:
# (x, y) = (m, n) = (a, b)
# we now have:
# seq_lvals = [(x, y), (m, n)]
# seq_rval = (a, b)
# We now zip this into:
# elementwise_assignments = [(a, x, m), (b, y, n)]
# where each elementwise assignment includes one element of rval and the
# corresponding element of each lval. Basically we unpack
# (x, y) = (m, n) = (a, b)
# into elementwise assignments
# x = m = a
# y = n = b
# and then we recursively call this method for each of those assignments.
# If the rval and all lvals are not all of the same length, zip will just ignore
# extra elements, so no error will be raised here; mypy will later complain
# about the length mismatch in type-checking.
elementwise_assignments = zip(rval.items, *[v.items for v in seq_lvals])
for rv, *lvs in elementwise_assignments:
self.process_module_assignment(lvs, rv, ctx)
elif isinstance(rval, RefExpr):
rnode = self.lookup_type_node(rval)
if rnode and isinstance(rnode.node, MypyFile):
for lval in lvals:
if not isinstance(lval, RefExpr):
continue
# respect explicitly annotated type
if (isinstance(lval.node, Var) and lval.node.type is not None):
continue
# We can handle these assignments to locals and to self
if isinstance(lval, NameExpr):
lnode = self.current_symbol_table().get(lval.name)
elif isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type is not None
lnode = self.type.names.get(lval.name)
else:
continue
if lnode:
if isinstance(lnode.node, MypyFile) and lnode.node is not rnode.node:
assert isinstance(lval, (NameExpr, MemberExpr))
self.fail(
'Cannot assign multiple modules to name "{}" '
'without explicit "types.ModuleType" annotation'.format(lval.name),
ctx)
# never create module alias except on initial var definition
elif lval.is_inferred_def:
assert rnode.node is not None
lnode.node = rnode.node
def process__all__(self, s: AssignmentStmt) -> None:
"""Export names if argument is a __all__ assignment."""
if (len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and
s.lvalues[0].name == '__all__' and s.lvalues[0].kind == GDEF and
isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def process__deletable__(self, s: AssignmentStmt) -> None:
if not self.options.mypyc:
return
if (len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and
s.lvalues[0].name == '__deletable__' and s.lvalues[0].kind == MDEF):
rvalue = s.rvalue
if not isinstance(rvalue, (ListExpr, TupleExpr)):
self.fail('"__deletable__" must be initialized with a list or tuple expression', s)
return
items = rvalue.items
attrs = []
for item in items:
if not isinstance(item, StrExpr):
self.fail('Invalid "__deletable__" item; string literal expected', item)
else:
attrs.append(item.value)
assert self.type
self.type.deletable_attributes = attrs
#
# Misc statements
#
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.block_depth[-1] += 1
for s in b.body:
self.accept(s)
self.block_depth[-1] -= 1
def visit_block_maybe(self, b: Optional[Block]) -> None:
if b:
self.visit_block(b)
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
self.statement = s
s.expr.accept(self)
def visit_return_stmt(self, s: ReturnStmt) -> None:
self.statement = s
if not self.is_func_scope():
self.fail('"return" outside function', s)
if s.expr:
s.expr.accept(self)
def visit_raise_stmt(self, s: RaiseStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.from_expr:
s.from_expr.accept(self)
def visit_assert_stmt(self, s: AssertStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.msg:
s.msg.accept(self)
def visit_operator_assignment_stmt(self,
s: OperatorAssignmentStmt) -> None:
self.statement = s
s.lvalue.accept(self)
s.rvalue.accept(self)
if (isinstance(s.lvalue, NameExpr) and s.lvalue.name == '__all__' and
s.lvalue.kind == GDEF and isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def visit_while_stmt(self, s: WhileStmt) -> None:
self.statement = s
s.expr.accept(self)
self.loop_depth += 1
s.body.accept(self)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_for_stmt(self, s: ForStmt) -> None:
self.statement = s
s.expr.accept(self)
# Bind index variables and check if they define new names.
self.analyze_lvalue(s.index, explicit_type=s.index_type is not None)
if s.index_type:
if self.is_classvar(s.index_type):
self.fail_invalid_classvar(s.index)
allow_tuple_literal = isinstance(s.index, TupleExpr)
analyzed = self.anal_type(s.index_type, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
self.store_declared_types(s.index, analyzed)
s.index_type = analyzed
self.loop_depth += 1
self.visit_block(s.body)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_break_stmt(self, s: BreakStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail('"break" outside loop', s, serious=True, blocker=True)
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail('"continue" outside loop', s, serious=True, blocker=True)
def visit_if_stmt(self, s: IfStmt) -> None:
self.statement = s
infer_reachability_of_if_statement(s, self.options)
for i in range(len(s.expr)):
s.expr[i].accept(self)
self.visit_block(s.body[i])
self.visit_block_maybe(s.else_body)
def visit_try_stmt(self, s: TryStmt) -> None:
self.statement = s
self.analyze_try_stmt(s, self)
def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor[None]) -> None:
s.body.accept(visitor)
for type, var, handler in zip(s.types, s.vars, s.handlers):
if type:
type.accept(visitor)
if var:
self.analyze_lvalue(var)
handler.accept(visitor)
if s.else_body:
s.else_body.accept(visitor)
if s.finally_body:
s.finally_body.accept(visitor)
def visit_with_stmt(self, s: WithStmt) -> None:
self.statement = s
types: List[Type] = []
if s.unanalyzed_type:
assert isinstance(s.unanalyzed_type, ProperType)
actual_targets = [t for t in s.target if t is not None]
if len(actual_targets) == 0:
# We have a type for no targets
self.fail('Invalid type comment: "with" statement has no targets', s)
elif len(actual_targets) == 1:
# We have one target and one type
types = [s.unanalyzed_type]
elif isinstance(s.unanalyzed_type, TupleType):
# We have multiple targets and multiple types
if len(actual_targets) == len(s.unanalyzed_type.items):
types = s.unanalyzed_type.items.copy()
else:
# But it's the wrong number of items
self.fail('Incompatible number of types for "with" targets', s)
else:
# We have multiple targets and one type
self.fail('Multiple types expected for multiple "with" targets', s)
new_types: List[Type] = []
for e, n in zip(s.expr, s.target):
e.accept(self)
if n:
self.analyze_lvalue(n, explicit_type=s.unanalyzed_type is not None)
# Since we have a target, pop the next type from types
if types:
t = types.pop(0)
if self.is_classvar(t):
self.fail_invalid_classvar(n)
allow_tuple_literal = isinstance(n, TupleExpr)
analyzed = self.anal_type(t, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
# TODO: Deal with this better
new_types.append(analyzed)
self.store_declared_types(n, analyzed)
s.analyzed_types = new_types
self.visit_block(s.body)
def visit_del_stmt(self, s: DelStmt) -> None:
self.statement = s
s.expr.accept(self)
if not self.is_valid_del_target(s.expr):
self.fail('Invalid delete target', s)
def is_valid_del_target(self, s: Expression) -> bool:
if isinstance(s, (IndexExpr, NameExpr, MemberExpr)):
return True
elif isinstance(s, (TupleExpr, ListExpr)):
return all(self.is_valid_del_target(item) for item in s.items)
else:
return False
def visit_global_decl(self, g: GlobalDecl) -> None:
self.statement = g
for name in g.names:
if name in self.nonlocal_decls[-1]:
self.fail('Name "{}" is nonlocal and global'.format(name), g)
self.global_decls[-1].add(name)
def visit_nonlocal_decl(self, d: NonlocalDecl) -> None:
self.statement = d
if not self.is_func_scope():
self.fail("nonlocal declaration not allowed at module level", d)
else:
for name in d.names:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
break
else:
self.fail('No binding for nonlocal "{}" found'.format(name), d)
if self.locals[-1] is not None and name in self.locals[-1]:
self.fail('Name "{}" is already defined in local '
'scope before nonlocal declaration'.format(name), d)
if name in self.global_decls[-1]:
self.fail('Name "{}" is nonlocal and global'.format(name), d)
self.nonlocal_decls[-1].add(name)
def visit_print_stmt(self, s: PrintStmt) -> None:
self.statement = s
for arg in s.args:
arg.accept(self)
if s.target:
s.target.accept(self)
def visit_exec_stmt(self, s: ExecStmt) -> None:
self.statement = s
s.expr.accept(self)
if s.globals:
s.globals.accept(self)
if s.locals:
s.locals.accept(self)
#
# Expressions
#
def visit_name_expr(self, expr: NameExpr) -> None:
n = self.lookup(expr.name, expr)
if n:
self.bind_name_expr(expr, n)
def bind_name_expr(self, expr: NameExpr, sym: SymbolTableNode) -> None:
"""Bind name expression to a symbol table node."""
if isinstance(sym.node, TypeVarExpr) and self.tvar_scope.get_binding(sym):
self.fail('"{}" is a type variable and only valid in type '
'context'.format(expr.name), expr)
elif isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'name', expr)
else:
expr.kind = sym.kind
expr.node = sym.node
expr.fullname = sym.fullname
def visit_super_expr(self, expr: SuperExpr) -> None:
if not self.type and not expr.call.args:
self.fail('"super" used outside class', expr)
return
expr.info = self.type
for arg in expr.call.args:
arg.accept(self)
def visit_tuple_expr(self, expr: TupleExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_list_expr(self, expr: ListExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_set_expr(self, expr: SetExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_dict_expr(self, expr: DictExpr) -> None:
for key, value in expr.items:
if key is not None:
key.accept(self)
value.accept(self)
def visit_star_expr(self, expr: StarExpr) -> None:
if not expr.valid:
# XXX TODO Change this error message
self.fail('Can use starred expression only as assignment target', expr)
else:
expr.expr.accept(self)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
if not self.is_func_scope(): # not sure
self.fail('"yield from" outside function', e, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
self.fail('"yield from" in async function', e, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
if e.expr:
e.expr.accept(self)
def visit_call_expr(self, expr: CallExpr) -> None:
"""Analyze a call expression.
Some call expressions are recognized as special forms, including
cast(...).
"""
expr.callee.accept(self)
if refers_to_fullname(expr.callee, 'typing.cast'):
# Special form cast(...).
if not self.check_fixed_args(expr, 2, 'cast'):
return
# Translate first argument to an unanalyzed type.
try:
target = self.expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Cast target is not a type', expr)
return
# Piggyback CastExpr object to the CallExpr object; it takes
# precedence over the CallExpr semantics.
expr.analyzed = CastExpr(expr.args[1], target)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_type'):
if not self.check_fixed_args(expr, 1, 'reveal_type'):
return
expr.analyzed = RevealExpr(kind=REVEAL_TYPE, expr=expr.args[0])
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_locals'):
# Store the local variable names into the RevealExpr for use in the
# type checking pass
local_nodes: List[Var] = []
if self.is_module_scope():
# try to determine just the variable declarations in module scope
# self.globals.values() contains SymbolTableNode's
# Each SymbolTableNode has an attribute node that is nodes.Var
# look for variable nodes that marked as is_inferred
# Each symboltable node has a Var node as .node
local_nodes = [n.node
for name, n in self.globals.items()
if getattr(n.node, 'is_inferred', False)
and isinstance(n.node, Var)]
elif self.is_class_scope():
# type = None # type: Optional[TypeInfo]
if self.type is not None:
local_nodes = [st.node
for st in self.type.names.values()
if isinstance(st.node, Var)]
elif self.is_func_scope():
# locals = None # type: List[Optional[SymbolTable]]
if self.locals is not None:
symbol_table = self.locals[-1]
if symbol_table is not None:
local_nodes = [st.node
for st in symbol_table.values()
if isinstance(st.node, Var)]
expr.analyzed = RevealExpr(kind=REVEAL_LOCALS, local_nodes=local_nodes)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'typing.Any'):
# Special form Any(...) no longer supported.
self.fail('Any(...) is no longer supported. Use cast(Any, ...) instead', expr)
elif refers_to_fullname(expr.callee, 'typing._promote'):
# Special form _promote(...).
if not self.check_fixed_args(expr, 1, '_promote'):
return
# Translate first argument to an unanalyzed type.
try:
target = self.expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Argument 1 to _promote is not a type', expr)
return
expr.analyzed = PromoteExpr(target)
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.dict'):
expr.analyzed = self.translate_dict_call(expr)
elif refers_to_fullname(expr.callee, 'builtins.divmod'):
if not self.check_fixed_args(expr, 2, 'divmod'):
return
expr.analyzed = OpExpr('divmod', expr.args[0], expr.args[1])
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
else:
# Normal call expression.
for a in expr.args:
a.accept(self)
if (isinstance(expr.callee, MemberExpr) and
isinstance(expr.callee.expr, NameExpr) and
expr.callee.expr.name == '__all__' and
expr.callee.expr.kind == GDEF and
expr.callee.name in ('append', 'extend')):
if expr.callee.name == 'append' and expr.args:
self.add_exports(expr.args[0])
elif (expr.callee.name == 'extend' and expr.args and
isinstance(expr.args[0], (ListExpr, TupleExpr))):
self.add_exports(expr.args[0].items)
def translate_dict_call(self, call: CallExpr) -> Optional[DictExpr]:
"""Translate 'dict(x=y, ...)' to {'x': y, ...} and 'dict()' to {}.
For other variants of dict(...), return None.
"""
if not all(kind == ARG_NAMED for kind in call.arg_kinds):
# Must still accept those args.
for a in call.args:
a.accept(self)
return None
expr = DictExpr([(StrExpr(cast(str, key)), value) # since they are all ARG_NAMED
for key, value in zip(call.arg_names, call.args)])
expr.set_line(call)
expr.accept(self)
return expr
def check_fixed_args(self, expr: CallExpr, numargs: int,
name: str) -> bool:
"""Verify that expr has specified number of positional args.
Return True if the arguments are valid.
"""
s = 's'
if numargs == 1:
s = ''
if len(expr.args) != numargs:
self.fail('"%s" expects %d argument%s' % (name, numargs, s),
expr)
return False
if expr.arg_kinds != [ARG_POS] * numargs:
self.fail('"%s" must be called with %s positional argument%s' %
(name, numargs, s), expr)
return False
return True
def visit_member_expr(self, expr: MemberExpr) -> None:
base = expr.expr
base.accept(self)
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
# Handle module attribute.
sym = self.get_module_symbol(base.node, expr.name)
if sym:
if isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'attribute', expr)
return
expr.kind = sym.kind
expr.fullname = sym.fullname
expr.node = sym.node
elif isinstance(base, RefExpr):
# This branch handles the case C.bar (or cls.bar or self.bar inside
# a classmethod/method), where C is a class and bar is a type
# definition or a module resulting from `import bar` (or a module
# assignment) inside class C. We look up bar in the class' TypeInfo
# namespace. This is done only when bar is a module or a type;
# other things (e.g. methods) are handled by other code in
# checkmember.
type_info = None
if isinstance(base.node, TypeInfo):
# C.bar where C is a class
type_info = base.node
elif isinstance(base.node, Var) and self.type and self.function_stack:
# check for self.bar or cls.bar in method/classmethod
func_def = self.function_stack[-1]
if not func_def.is_static and isinstance(func_def.type, CallableType):
formal_arg = func_def.type.argument_by_name(base.node.name)
if formal_arg and formal_arg.pos == 0:
type_info = self.type
elif isinstance(base.node, TypeAlias) and base.node.no_args:
assert isinstance(base.node.target, ProperType)
if isinstance(base.node.target, Instance):
type_info = base.node.target.type
if type_info:
n = type_info.names.get(expr.name)
if n is not None and isinstance(n.node, (MypyFile, TypeInfo, TypeAlias)):
if not n:
return
expr.kind = n.kind
expr.fullname = n.fullname
expr.node = n.node
def visit_op_expr(self, expr: OpExpr) -> None:
expr.left.accept(self)
if expr.op in ('and', 'or'):
inferred = infer_condition_value(expr.left, self.options)
if ((inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'and') or
(inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'or')):
expr.right_unreachable = True
return
elif ((inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'and') or
(inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'or')):
expr.right_always = True
expr.right.accept(self)
def visit_comparison_expr(self, expr: ComparisonExpr) -> None:
for operand in expr.operands:
operand.accept(self)
def visit_unary_expr(self, expr: UnaryExpr) -> None:
expr.expr.accept(self)
def visit_index_expr(self, expr: IndexExpr) -> None:
base = expr.base
base.accept(self)
if (isinstance(base, RefExpr)
and isinstance(base.node, TypeInfo)
and not base.node.is_generic()):
expr.index.accept(self)
elif ((isinstance(base, RefExpr) and isinstance(base.node, TypeAlias))
or refers_to_class_or_function(base)):
# We need to do full processing on every iteration, since some type
# arguments may contain placeholder types.
self.analyze_type_application(expr)
else:
expr.index.accept(self)
def analyze_type_application(self, expr: IndexExpr) -> None:
"""Analyze special form -- type application (either direct or via type aliasing)."""
types = self.analyze_type_application_args(expr)
if types is None:
return
base = expr.base
expr.analyzed = TypeApplication(base, types)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
# Types list, dict, set are not subscriptable, prohibit this if
# subscripted either via type alias...
if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias):
alias = base.node
target = get_proper_type(alias.target)
if isinstance(target, Instance):
name = target.type.fullname
if (alias.no_args and # this avoids bogus errors for already reported aliases
name in get_nongen_builtins(self.options.python_version) and
not alias.normalized):
self.fail(no_subscript_builtin_alias(name, propose_alt=False), expr)
# ...or directly.
else:
n = self.lookup_type_node(base)
if (n and n.fullname in get_nongen_builtins(self.options.python_version) and
not self.is_stub_file):
self.fail(no_subscript_builtin_alias(n.fullname, propose_alt=False), expr)
def analyze_type_application_args(self, expr: IndexExpr) -> Optional[List[Type]]:
"""Analyze type arguments (index) in a type application.
Return None if anything was incomplete.
"""
index = expr.index
tag = self.track_incomplete_refs()
self.analyze_type_expr(index)
if self.found_incomplete_ref(tag):
return None
types: List[Type] = []
if isinstance(index, TupleExpr):
items = index.items
is_tuple = isinstance(expr.base, RefExpr) and expr.base.fullname == 'builtins.tuple'
if is_tuple and len(items) == 2 and isinstance(items[-1], EllipsisExpr):
items = items[:-1]
else:
items = [index]
for item in items:
try:
typearg = self.expr_to_unanalyzed_type(item)
except TypeTranslationError:
self.fail('Type expected within [...]', expr)
return None
# We always allow unbound type variables in IndexExpr, since we
# may be analysing a type alias definition rvalue. The error will be
# reported elsewhere if it is not the case.
analyzed = self.anal_type(typearg, allow_unbound_tvars=True,
allow_placeholder=True)
if analyzed is None:
return None
types.append(analyzed)
return types
def visit_slice_expr(self, expr: SliceExpr) -> None:
if expr.begin_index:
expr.begin_index.accept(self)
if expr.end_index:
expr.end_index.accept(self)
if expr.stride:
expr.stride.accept(self)
def visit_cast_expr(self, expr: CastExpr) -> None:
expr.expr.accept(self)
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_reveal_expr(self, expr: RevealExpr) -> None:
if expr.kind == REVEAL_TYPE:
if expr.expr is not None:
expr.expr.accept(self)
else:
# Reveal locals doesn't have an inner expression, there's no
# need to traverse inside it
pass
def visit_type_application(self, expr: TypeApplication) -> None:
expr.expr.accept(self)
for i in range(len(expr.types)):
analyzed = self.anal_type(expr.types[i])
if analyzed is not None:
expr.types[i] = analyzed
def visit_list_comprehension(self, expr: ListComprehension) -> None:
expr.generator.accept(self)
def visit_set_comprehension(self, expr: SetComprehension) -> None:
expr.generator.accept(self)
def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.key.accept(self)
expr.value.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def visit_generator_expr(self, expr: GeneratorExpr) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.left_expr.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def analyze_comp_for(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 1).
That is the part after 'for' in (x for x in l if p). This analyzes
variables and conditions which are analyzed in a local scope.
"""
for i, (index, sequence, conditions) in enumerate(zip(expr.indices,
expr.sequences,
expr.condlists)):
if i > 0:
sequence.accept(self)
# Bind index variables.
self.analyze_lvalue(index)
for cond in conditions:
cond.accept(self)
def analyze_comp_for_2(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
"""Analyses the 'comp_for' part of comprehensions (part 2).
That is the part after 'for' in (x for x in l if p). This analyzes
the 'l' part which is analyzed in the surrounding scope.
"""
expr.sequences[0].accept(self)
def visit_lambda_expr(self, expr: LambdaExpr) -> None:
self.analyze_arg_initializers(expr)
self.analyze_function_body(expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> None:
expr.if_expr.accept(self)
expr.cond.accept(self)
expr.else_expr.accept(self)
def visit_backquote_expr(self, expr: BackquoteExpr) -> None:
expr.expr.accept(self)
def visit__promote_expr(self, expr: PromoteExpr) -> None:
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_yield_expr(self, expr: YieldExpr) -> None:
if not self.is_func_scope():
self.fail('"yield" outside function', expr, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
if self.options.python_version < (3, 6):
self.fail('"yield" in async function', expr, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
self.function_stack[-1].is_async_generator = True
else:
self.function_stack[-1].is_generator = True
if expr.expr:
expr.expr.accept(self)
def visit_await_expr(self, expr: AwaitExpr) -> None:
if not self.is_func_scope():
self.fail('"await" outside function', expr)
elif not self.function_stack[-1].is_coroutine:
self.fail('"await" outside coroutine ("async def")', expr)
expr.expr.accept(self)
#
# Lookup functions
#
def lookup(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
"""Look up an unqualified (no dots) name in all active namespaces.
Note that the result may contain a PlaceholderNode. The caller may
want to defer in that case.
Generate an error if the name is not defined unless suppress_errors
is true or the current namespace is incomplete. In the latter case
defer.
"""
implicit_name = False
# 1a. Name declared using 'global x' takes precedence
if name in self.global_decls[-1]:
if name in self.globals:
return self.globals[name]
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 1b. Name declared using 'nonlocal x' takes precedence
if name in self.nonlocal_decls[-1]:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
return table[name]
else:
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
# 2. Class attributes (if within class definition)
if self.type and not self.is_func_scope() and name in self.type.names:
node = self.type.names[name]
if not node.implicit:
if self.is_active_symbol_in_class_body(node.node):
return node
else:
# Defined through self.x assignment
implicit_name = True
implicit_node = node
# 3. Local (function) scopes
for table in reversed(self.locals):
if table is not None and name in table:
return table[name]
# 4. Current file global scope
if name in self.globals:
return self.globals[name]
# 5. Builtins
b = self.globals.get('__builtins__', None)
if b:
assert isinstance(b.node, MypyFile)
table = b.node.names
if name in table:
if name[0] == "_" and name[1] != "_":
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
node = table[name]
return node
# Give up.
if not implicit_name and not suppress_errors:
self.name_not_defined(name, ctx)
else:
if implicit_name:
return implicit_node
return None
def is_active_symbol_in_class_body(self, node: Optional[SymbolNode]) -> bool:
"""Can a symbol defined in class body accessed at current statement?
Only allow access to class attributes textually after
the definition, so that it's possible to fall back to the
outer scope. Example:
class X: ...
class C:
X = X # Initializer refers to outer scope
Nested classes are an exception, since we want to support
arbitrary forward references in type annotations.
"""
# TODO: Forward reference to name imported in class body is not
# caught.
assert self.statement # we are at class scope
return (node is None
or self.is_textually_before_statement(node)
or not self.is_defined_in_current_module(node.fullname)
or isinstance(node, TypeInfo)
or (isinstance(node, PlaceholderNode) and node.becomes_typeinfo))
def is_textually_before_statement(self, node: SymbolNode) -> bool:
"""Check if a node is defined textually before the current statement
Note that decorated functions' line number are the same as
the top decorator.
"""
assert self.statement
line_diff = self.statement.line - node.line
# The first branch handles reference an overloaded function variant inside itself,
# this is a corner case where mypy technically deviates from runtime name resolution,
# but it is fine because we want an overloaded function to be treated as a single unit.
if self.is_overloaded_item(node, self.statement):
return False
elif isinstance(node, Decorator) and not node.is_overload:
return line_diff > len(node.original_decorators)
else:
return line_diff > 0
def is_overloaded_item(self, node: SymbolNode, statement: Statement) -> bool:
"""Check whether the function belongs to the overloaded variants"""
if isinstance(node, OverloadedFuncDef) and isinstance(statement, FuncDef):
in_items = statement in {item.func if isinstance(item, Decorator)
else item for item in node.items}
in_impl = (node.impl is not None and
((isinstance(node.impl, Decorator) and statement is node.impl.func)
or statement is node.impl))
return in_items or in_impl
return False
def is_defined_in_current_module(self, fullname: Optional[str]) -> bool:
if fullname is None:
return False
return module_prefix(self.modules, fullname) == self.cur_mod_id
def lookup_qualified(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
"""Lookup a qualified name in all activate namespaces.
Note that the result may contain a PlaceholderNode. The caller may
want to defer in that case.
Generate an error if the name is not defined unless suppress_errors
is true or the current namespace is incomplete. In the latter case
defer.
"""
if '.' not in name:
# Simple case: look up a short name.
return self.lookup(name, ctx, suppress_errors=suppress_errors)
parts = name.split('.')
namespace = self.cur_mod_id
sym = self.lookup(parts[0], ctx, suppress_errors=suppress_errors)
if sym:
for i in range(1, len(parts)):
node = sym.node
part = parts[i]
if isinstance(node, TypeInfo):
nextsym = node.get(part)
elif isinstance(node, MypyFile):
nextsym = self.get_module_symbol(node, part)
namespace = node.fullname
elif isinstance(node, PlaceholderNode):
return sym
elif isinstance(node, TypeAlias) and node.no_args:
assert isinstance(node.target, ProperType)
if isinstance(node.target, Instance):
nextsym = node.target.type.get(part)
else:
if isinstance(node, Var):
typ = get_proper_type(node.type)
if isinstance(typ, AnyType):
# Allow access through Var with Any type without error.
return self.implicit_symbol(sym, name, parts[i:], typ)
# Lookup through invalid node, such as variable or function
nextsym = None
if not nextsym or nextsym.module_hidden:
if not suppress_errors:
self.name_not_defined(name, ctx, namespace=namespace)
return None
sym = nextsym
return sym
def lookup_type_node(self, expr: Expression) -> Optional[SymbolTableNode]:
try:
t = self.expr_to_unanalyzed_type(expr)
except TypeTranslationError:
return None
if isinstance(t, UnboundType):
n = self.lookup_qualified(t.name, expr, suppress_errors=True)
return n
return None
def get_module_symbol(self, node: MypyFile, name: str) -> Optional[SymbolTableNode]:
"""Look up a symbol from a module.
Return None if no matching symbol could be bound.
"""
module = node.fullname
names = node.names
sym = names.get(name)
if not sym:
fullname = module + '.' + name
if fullname in self.modules:
sym = SymbolTableNode(GDEF, self.modules[fullname])
elif self.is_incomplete_namespace(module):
self.record_incomplete_ref()
elif ('__getattr__' in names
and (node.is_stub
or self.options.python_version >= (3, 7))):
gvar = self.create_getattr_var(names['__getattr__'], name, fullname)
if gvar:
sym = SymbolTableNode(GDEF, gvar)
elif self.is_missing_module(fullname):
# We use the fullname of the original definition so that we can
# detect whether two names refer to the same thing.
var_type = AnyType(TypeOfAny.from_unimported_type)
v = Var(name, type=var_type)
v._fullname = fullname
sym = SymbolTableNode(GDEF, v)
elif sym.module_hidden:
sym = None
return sym
def is_missing_module(self, module: str) -> bool:
return module in self.missing_modules
def implicit_symbol(self, sym: SymbolTableNode, name: str, parts: List[str],
source_type: AnyType) -> SymbolTableNode:
"""Create symbol for a qualified name reference through Any type."""
if sym.node is None:
basename = None
else:
basename = sym.node.fullname
if basename is None:
fullname = name
else:
fullname = basename + '.' + '.'.join(parts)
var_type = AnyType(TypeOfAny.from_another_any, source_type)
var = Var(parts[-1], var_type)
var._fullname = fullname
return SymbolTableNode(GDEF, var)
def create_getattr_var(self, getattr_defn: SymbolTableNode,
name: str, fullname: str) -> Optional[Var]:
"""Create a dummy variable using module-level __getattr__ return type.
If not possible, return None.
Note that multiple Var nodes can be created for a single name. We
can use the from_module_getattr and the fullname attributes to
check if two dummy Var nodes refer to the same thing. Reusing Var
nodes would require non-local mutable state, which we prefer to
avoid.
"""
if isinstance(getattr_defn.node, (FuncDef, Var)):
node_type = get_proper_type(getattr_defn.node.type)
if isinstance(node_type, CallableType):
typ = node_type.ret_type
else:
typ = AnyType(TypeOfAny.from_error)
v = Var(name, type=typ)
v._fullname = fullname
v.from_module_getattr = True
return v
return None
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
"""Lookup a fully qualified name.
Assume that the name is defined. This happens in the global namespace --
the local module namespace is ignored.
Note that this doesn't support visibility, module-level __getattr__, or
nested classes.
"""
parts = name.split('.')
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
next_sym = n.names[parts[i]]
assert isinstance(next_sym.node, MypyFile)
n = next_sym.node
return n.names[parts[-1]]
def lookup_fully_qualified_or_none(self, fullname: str) -> Optional[SymbolTableNode]:
"""Lookup a fully qualified name that refers to a module-level definition.
Don't assume that the name is defined. This happens in the global namespace --
the local module namespace is ignored. This does not dereference indirect
refs.
Note that this can't be used for names nested in class namespaces.
"""
# TODO: unify/clean-up/simplify lookup methods, see #4157.
# TODO: support nested classes (but consider performance impact,
# we might keep the module level only lookup for thing like 'builtins.int').
assert '.' in fullname
module, name = fullname.rsplit('.', maxsplit=1)
if module not in self.modules:
return None
filenode = self.modules[module]
result = filenode.names.get(name)
if result is None and self.is_incomplete_namespace(module):
# TODO: More explicit handling of incomplete refs?
self.record_incomplete_ref()
return result
def builtin_type(self, fully_qualified_name: str) -> Instance:
sym = self.lookup_fully_qualified(fully_qualified_name)
node = sym.node
assert isinstance(node, TypeInfo)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def object_type(self) -> Instance:
return self.named_type('__builtins__.object')
def str_type(self) -> Instance:
return self.named_type('__builtins__.str')
def named_type(self, qualified_name: str, args: Optional[List[Type]] = None) -> Instance:
sym = self.lookup_qualified(qualified_name, Context())
assert sym, "Internal error: attempted to construct unknown type"
node = sym.node
assert isinstance(node, TypeInfo)
if args:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def named_type_or_none(self, qualified_name: str,
args: Optional[List[Type]] = None) -> Optional[Instance]:
sym = self.lookup_fully_qualified_or_none(qualified_name)
if not sym or isinstance(sym.node, PlaceholderNode):
return None
node = sym.node
if isinstance(node, TypeAlias):
assert isinstance(node.target, Instance) # type: ignore
node = node.target.type
assert isinstance(node, TypeInfo), node
if args is not None:
# TODO: assert len(args) == len(node.defn.type_vars)
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.unannotated)] * len(node.defn.type_vars))
def lookup_current_scope(self, name: str) -> Optional[SymbolTableNode]:
if self.locals[-1] is not None:
return self.locals[-1].get(name)
elif self.type is not None:
return self.type.names.get(name)
else:
return self.globals.get(name)
#
# Adding symbols
#
def add_symbol(self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
"""Add symbol to the currently active symbol table.
Generally additions to symbol table should go through this method or
one of the methods below so that kinds, redefinitions, conditional
definitions, and skipped names are handled consistently.
Return True if we actually added the symbol, or False if we refused to do so
(because something is not ready).
If can_defer is True, defer current target if adding a placeholder.
"""
if self.is_func_scope():
kind = LDEF
elif self.type is not None:
kind = MDEF
else:
kind = GDEF
symbol = SymbolTableNode(kind,
node,
module_public=module_public,
module_hidden=module_hidden)
return self.add_symbol_table_node(name, symbol, context, can_defer, escape_comprehensions)
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
"""Same as above, but skipping the local namespace.
This doesn't check for previous definition and is only used
for serialization of method-level classes.
Classes defined within methods can be exposed through an
attribute type, but method-level symbol tables aren't serialized.
This method can be used to add such classes to an enclosing,
serialized symbol table.
"""
# TODO: currently this is only used by named tuples. Use this method
# also by typed dicts and normal classes, see issue #6422.
if self.type is not None:
names = self.type.names
kind = MDEF
else:
names = self.globals
kind = GDEF
symbol = SymbolTableNode(kind, node)
names[name] = symbol
def add_symbol_table_node(self,
name: str,
symbol: SymbolTableNode,
context: Optional[Context] = None,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
"""Add symbol table node to the currently active symbol table.
Return True if we actually added the symbol, or False if we refused
to do so (because something is not ready or it was a no-op).
Generate an error if there is an invalid redefinition.
If context is None, unconditionally add node, since we can't report
an error. Note that this is used by plugins to forcibly replace nodes!
TODO: Prevent plugins from replacing nodes, as it could cause problems?
Args:
name: short name of symbol
symbol: Node to add
can_defer: if True, defer current target if adding a placeholder
context: error context (see above about None value)
"""
names = self.current_symbol_table(escape_comprehensions=escape_comprehensions)
existing = names.get(name)
if isinstance(symbol.node, PlaceholderNode) and can_defer:
self.defer(context)
if (existing is not None
and context is not None
and not is_valid_replacement(existing, symbol)):
# There is an existing node, so this may be a redefinition.
# If the new node points to the same node as the old one,
# or if both old and new nodes are placeholders, we don't
# need to do anything.
old = existing.node
new = symbol.node
if isinstance(new, PlaceholderNode):
# We don't know whether this is okay. Let's wait until the next iteration.
return False
if not is_same_symbol(old, new):
if isinstance(new, (FuncDef, Decorator, OverloadedFuncDef, TypeInfo)):
self.add_redefinition(names, name, symbol)
if not (isinstance(new, (FuncDef, Decorator))
and self.set_original_def(old, new)):
self.name_already_defined(name, context, existing)
elif (name not in self.missing_names[-1] and '*' not in self.missing_names[-1]):
names[name] = symbol
self.progress = True
return True
return False
def add_redefinition(self,
names: SymbolTable,
name: str,
symbol: SymbolTableNode) -> None:
"""Add a symbol table node that reflects a redefinition as a function or a class.
Redefinitions need to be added to the symbol table so that they can be found
through AST traversal, but they have dummy names of form 'name-redefinition[N]',
where N ranges over 2, 3, ... (omitted for the first redefinition).
Note: we always store redefinitions independently of whether they are valid or not
(so they will be semantically analyzed), the caller should give an error for invalid
redefinitions (such as e.g. variable redefined as a class).
"""
i = 1
# Don't serialize redefined nodes. They are likely to have
# busted internal references which can cause problems with
# serialization and they can't have any external references to
# them.
symbol.no_serialize = True
while True:
if i == 1:
new_name = '{}-redefinition'.format(name)
else:
new_name = '{}-redefinition{}'.format(name, i)
existing = names.get(new_name)
if existing is None:
names[new_name] = symbol
return
elif existing.node is symbol.node:
# Already there
return
i += 1
def add_local(self, node: Union[Var, FuncDef, OverloadedFuncDef], context: Context) -> None:
"""Add local variable or function."""
assert self.is_func_scope()
name = node.name
node._fullname = name
self.add_symbol(name, node, context)
def add_module_symbol(self,
id: str,
as_id: str,
context: Context,
module_public: bool,
module_hidden: bool) -> None:
"""Add symbol that is a reference to a module object."""
if id in self.modules:
node = self.modules[id]
self.add_symbol(as_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
else:
self.add_unknown_imported_symbol(
as_id, context, target_name=id, module_public=module_public,
module_hidden=module_hidden
)
def add_imported_symbol(self,
name: str,
node: SymbolTableNode,
context: Context,
module_public: bool,
module_hidden: bool) -> None:
"""Add an alias to an existing symbol through import."""
assert not module_hidden or not module_public
symbol = SymbolTableNode(node.kind, node.node,
module_public=module_public,
module_hidden=module_hidden)
self.add_symbol_table_node(name, symbol, context)
def add_unknown_imported_symbol(self,
name: str,
context: Context,
target_name: Optional[str],
module_public: bool,
module_hidden: bool) -> None:
"""Add symbol that we don't know what it points to because resolving an import failed.
This can happen if a module is missing, or it is present, but doesn't have
the imported attribute. The `target_name` is the name of symbol in the namespace
it is imported from. For example, for 'from mod import x as y' the target_name is
'mod.x'. This is currently used only to track logical dependencies.
"""
existing = self.current_symbol_table().get(name)
if existing and isinstance(existing.node, Var) and existing.node.is_suppressed_import:
# This missing import was already added -- nothing to do here.
return
var = Var(name)
if self.options.logical_deps and target_name is not None:
# This makes it possible to add logical fine-grained dependencies
# from a missing module. We can't use this by default, since in a
# few places we assume that the full name points to a real
# definition, but this name may point to nothing.
var._fullname = target_name
elif self.type:
var._fullname = self.type.fullname + "." + name
var.info = self.type
else:
var._fullname = self.qualified_name(name)
var.is_ready = True
any_type = AnyType(TypeOfAny.from_unimported_type, missing_import_name=var._fullname)
var.type = any_type
var.is_suppressed_import = True
self.add_symbol(
name, var, context, module_public=module_public, module_hidden=module_hidden
)
#
# Other helpers
#
@contextmanager
def tvar_scope_frame(self, frame: TypeVarLikeScope) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = frame
yield
self.tvar_scope = old_scope
def defer(self, debug_context: Optional[Context] = None) -> None:
"""Defer current analysis target to be analyzed again.
This must be called if something in the current target is
incomplete or has a placeholder node. However, this must *not*
be called during the final analysis iteration! Instead, an error
should be generated. Often 'process_placeholder' is a good
way to either defer or generate an error.
NOTE: Some methods, such as 'anal_type', 'mark_incomplete' and
'record_incomplete_ref', call this implicitly, or when needed.
They are usually preferable to a direct defer() call.
"""
assert not self.final_iteration, 'Must not defer during final iteration'
self.deferred = True
# Store debug info for this deferral.
line = (debug_context.line if debug_context else
self.statement.line if self.statement else -1)
self.deferral_debug_context.append((self.cur_mod_id, line))
def track_incomplete_refs(self) -> Tag:
"""Return tag that can be used for tracking references to incomplete names."""
return self.num_incomplete_refs
def found_incomplete_ref(self, tag: Tag) -> bool:
"""Have we encountered an incomplete reference since starting tracking?"""
return self.num_incomplete_refs != tag
def record_incomplete_ref(self) -> None:
"""Record the encounter of an incomplete reference and defer current analysis target."""
self.defer()
self.num_incomplete_refs += 1
def mark_incomplete(self, name: str, node: Node,
becomes_typeinfo: bool = False,
module_public: bool = True,
module_hidden: bool = False) -> None:
"""Mark a definition as incomplete (and defer current analysis target).
Also potentially mark the current namespace as incomplete.
Args:
name: The name that we weren't able to define (or '*' if the name is unknown)
node: The node that refers to the name (definition or lvalue)
becomes_typeinfo: Pass this to PlaceholderNode (used by special forms like
named tuples that will create TypeInfos).
"""
self.defer(node)
if name == '*':
self.incomplete = True
elif not self.is_global_or_nonlocal(name):
fullname = self.qualified_name(name)
assert self.statement
placeholder = PlaceholderNode(fullname, node, self.statement.line,
becomes_typeinfo=becomes_typeinfo)
self.add_symbol(name, placeholder,
module_public=module_public, module_hidden=module_hidden,
context=dummy_context())
self.missing_names[-1].add(name)
def is_incomplete_namespace(self, fullname: str) -> bool:
"""Is a module or class namespace potentially missing some definitions?
If a name is missing from an incomplete namespace, we'll need to defer the
current analysis target.
"""
return fullname in self.incomplete_namespaces
def process_placeholder(self, name: str, kind: str, ctx: Context) -> None:
"""Process a reference targeting placeholder node.
If this is not a final iteration, defer current node,
otherwise report an error.
The 'kind' argument indicates if this a name or attribute expression
(used for better error message).
"""
if self.final_iteration:
self.cannot_resolve_name(name, kind, ctx)
else:
self.defer(ctx)
def cannot_resolve_name(self, name: str, kind: str, ctx: Context) -> None:
self.fail('Cannot resolve {} "{}" (possible cyclic definition)'.format(kind, name), ctx)
def qualified_name(self, name: str) -> str:
if self.type is not None:
return self.type._fullname + '.' + name
elif self.is_func_scope():
return name
else:
return self.cur_mod_id + '.' + name
def enter(self, function: Union[FuncItem, GeneratorExpr, DictionaryComprehension]) -> None:
"""Enter a function, generator or comprehension scope."""
names = self.saved_locals.setdefault(function, SymbolTable())
self.locals.append(names)
is_comprehension = isinstance(function, (GeneratorExpr, DictionaryComprehension))
self.is_comprehension_stack.append(is_comprehension)
self.global_decls.append(set())
self.nonlocal_decls.append(set())
# -1 since entering block will increment this to 0.
self.block_depth.append(-1)
self.missing_names.append(set())
def leave(self) -> None:
self.locals.pop()
self.is_comprehension_stack.pop()
self.global_decls.pop()
self.nonlocal_decls.pop()
self.block_depth.pop()
self.missing_names.pop()
def is_func_scope(self) -> bool:
return self.locals[-1] is not None
def is_nested_within_func_scope(self) -> bool:
"""Are we underneath a function scope, even if we are in a nested class also?"""
return any(l is not None for l in self.locals)
def is_class_scope(self) -> bool:
return self.type is not None and not self.is_func_scope()
def is_module_scope(self) -> bool:
return not (self.is_class_scope() or self.is_func_scope())
def current_symbol_kind(self) -> int:
if self.is_class_scope():
kind = MDEF
elif self.is_func_scope():
kind = LDEF
else:
kind = GDEF
return kind
def current_symbol_table(self, escape_comprehensions: bool = False) -> SymbolTable:
if self.is_func_scope():
assert self.locals[-1] is not None
if escape_comprehensions:
assert len(self.locals) == len(self.is_comprehension_stack)
# Retrieve the symbol table from the enclosing non-comprehension scope.
for i, is_comprehension in enumerate(reversed(self.is_comprehension_stack)):
if not is_comprehension:
if i == len(self.locals) - 1: # The last iteration.
# The caller of the comprehension is in the global space.
names = self.globals
else:
names_candidate = self.locals[-1 - i]
assert names_candidate is not None, \
"Escaping comprehension from invalid scope"
names = names_candidate
break
else:
assert False, "Should have at least one non-comprehension scope"
else:
names = self.locals[-1]
assert names is not None
elif self.type is not None:
names = self.type.names
else:
names = self.globals
return names
def is_global_or_nonlocal(self, name: str) -> bool:
return (self.is_func_scope()
and (name in self.global_decls[-1]
or name in self.nonlocal_decls[-1]))
def add_exports(self, exp_or_exps: Union[Iterable[Expression], Expression]) -> None:
exps = [exp_or_exps] if isinstance(exp_or_exps, Expression) else exp_or_exps
for exp in exps:
if isinstance(exp, StrExpr):
self.all_exports.append(exp.value)
def check_no_global(self,
name: str,
ctx: Context,
is_overloaded_func: bool = False) -> None:
if name in self.globals:
prev_is_overloaded = isinstance(self.globals[name], OverloadedFuncDef)
if is_overloaded_func and prev_is_overloaded:
self.fail("Nonconsecutive overload {} found".format(name), ctx)
elif prev_is_overloaded:
self.fail("Definition of '{}' missing 'overload'".format(name), ctx)
else:
self.name_already_defined(name, ctx, self.globals[name])
def name_not_defined(self, name: str, ctx: Context, namespace: Optional[str] = None) -> None:
incomplete = self.is_incomplete_namespace(namespace or self.cur_mod_id)
if (namespace is None
and self.type
and not self.is_func_scope()
and self.incomplete_type_stack[-1]
and not self.final_iteration):
# We are processing a class body for the first time, so it is incomplete.
incomplete = True
if incomplete:
# Target namespace is incomplete, so it's possible that the name will be defined
# later on. Defer current target.
self.record_incomplete_ref()
return
message = 'Name "{}" is not defined'.format(name)
self.fail(message, ctx, code=codes.NAME_DEFINED)
if 'builtins.{}'.format(name) in SUGGESTED_TEST_FIXTURES:
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(name)
if self.lookup_fully_qualified_or_none(fullname) is None:
# Yes. Generate a helpful note.
self.msg.add_fixture_note(fullname, ctx)
modules_with_unimported_hints = {
name.split('.', 1)[0]
for name in TYPES_FOR_UNIMPORTED_HINTS
}
lowercased = {
name.lower(): name
for name in TYPES_FOR_UNIMPORTED_HINTS
}
for module in modules_with_unimported_hints:
fullname = '{}.{}'.format(module, name).lower()
if fullname not in lowercased:
continue
# User probably forgot to import these types.
hint = (
'Did you forget to import it from "{module}"?'
' (Suggestion: "from {module} import {name}")'
).format(module=module, name=lowercased[fullname].rsplit('.', 1)[-1])
self.note(hint, ctx, code=codes.NAME_DEFINED)
def already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]],
noun: str) -> None:
if isinstance(original_ctx, SymbolTableNode):
node: Optional[SymbolNode] = original_ctx.node
elif isinstance(original_ctx, SymbolNode):
node = original_ctx
else:
node = None
if isinstance(original_ctx, SymbolTableNode) and isinstance(original_ctx.node, MypyFile):
# Since this is an import, original_ctx.node points to the module definition.
# Therefore its line number is always 1, which is not useful for this
# error message.
extra_msg = ' (by an import)'
elif node and node.line != -1 and self.is_local_name(node.fullname):
# TODO: Using previous symbol node may give wrong line. We should use
# the line number where the binding was established instead.
extra_msg = ' on line {}'.format(node.line)
else:
extra_msg = ' (possibly by an import)'
self.fail('{} "{}" already defined{}'.format(noun, unmangle(name), extra_msg), ctx,
code=codes.NO_REDEF)
def name_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Name')
def attribute_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Attribute')
def is_local_name(self, name: str) -> bool:
"""Does name look like reference to a definition in the current module?"""
return self.is_defined_in_current_module(name) or '.' not in name
def fail(self,
msg: str,
ctx: Context,
serious: bool = False,
*,
code: Optional[ErrorCode] = None,
blocker: bool = False) -> None:
if (not serious and
not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
# In case it's a bug and we don't really have context
assert ctx is not None, msg
self.errors.report(ctx.get_line(), ctx.get_column(), msg, blocker=blocker, code=code)
def fail_blocker(self, msg: str, ctx: Context) -> None:
self.fail(msg, ctx, blocker=True)
def note(self, msg: str, ctx: Context, code: Optional[ErrorCode] = None) -> None:
if (not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
self.errors.report(ctx.get_line(), ctx.get_column(), msg, severity='note', code=code)
def accept(self, node: Node) -> None:
try:
node.accept(self)
except Exception as err:
report_internal_error(err, self.errors.file, node.line, self.errors, self.options)
def expr_to_analyzed_type(self,
expr: Expression,
report_invalid_types: bool = True,
allow_placeholder: bool = False) -> Optional[Type]:
if isinstance(expr, CallExpr):
expr.accept(self)
internal_name, info = self.named_tuple_analyzer.check_namedtuple(expr, None,
self.is_func_scope())
if internal_name is None:
# Some form of namedtuple is the only valid type that looks like a call
# expression. This isn't a valid type.
raise TypeTranslationError()
elif not info:
self.defer(expr)
return None
assert info.tuple_type, "NamedTuple without tuple type"
fallback = Instance(info, [])
return TupleType(info.tuple_type.items, fallback=fallback)
typ = self.expr_to_unanalyzed_type(expr)
return self.anal_type(typ, report_invalid_types=report_invalid_types,
allow_placeholder=allow_placeholder)
def analyze_type_expr(self, expr: Expression) -> None:
# There are certain expressions that mypy does not need to semantically analyze,
# since they analyzed solely as type. (For example, indexes in type alias definitions
# and base classes in class defs). External consumers of the mypy AST may need
# them semantically analyzed, however, if they need to treat it as an expression
# and not a type. (Which is to say, mypyc needs to do this.) Do the analysis
# in a fresh tvar scope in order to suppress any errors about using type variables.
with self.tvar_scope_frame(TypeVarLikeScope()):
expr.accept(self)
def type_analyzer(self, *,
tvar_scope: Optional[TypeVarLikeScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True) -> TypeAnalyser:
if tvar_scope is None:
tvar_scope = self.tvar_scope
tpan = TypeAnalyser(self,
tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
report_invalid_types=report_invalid_types,
allow_new_syntax=self.is_stub_file,
allow_placeholder=allow_placeholder)
tpan.in_dynamic_func = bool(self.function_stack and self.function_stack[-1].is_dynamic())
tpan.global_scope = not self.type and not self.function_stack
return tpan
def expr_to_unanalyzed_type(self, node: Expression) -> ProperType:
return expr_to_unanalyzed_type(node, self.options, self.is_stub_file)
def anal_type(self,
typ: Type, *,
tvar_scope: Optional[TypeVarLikeScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True,
third_pass: bool = False) -> Optional[Type]:
"""Semantically analyze a type.
Args:
typ: Type to analyze (if already analyzed, this is a no-op)
allow_placeholder: If True, may return PlaceholderType if
encountering an incomplete definition
third_pass: Unused; only for compatibility with old semantic
analyzer
Return None only if some part of the type couldn't be bound *and* it
referred to an incomplete namespace or definition. In this case also
defer as needed. During a final iteration this won't return None;
instead report an error if the type can't be analyzed and return
AnyType.
In case of other errors, report an error message and return AnyType.
NOTE: The caller shouldn't defer even if this returns None or a
placeholder type.
"""
a = self.type_analyzer(tvar_scope=tvar_scope,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
allow_placeholder=allow_placeholder,
report_invalid_types=report_invalid_types)
tag = self.track_incomplete_refs()
typ = typ.accept(a)
if self.found_incomplete_ref(tag):
# Something could not be bound yet.
return None
self.add_type_alias_deps(a.aliases_used)
return typ
def class_type(self, self_type: Type) -> Type:
return TypeType.make_normalized(self_type)
def schedule_patch(self, priority: int, patch: Callable[[], None]) -> None:
self.patches.append((priority, patch))
def report_hang(self) -> None:
print('Deferral trace:')
for mod, line in self.deferral_debug_context:
print(' {}:{}'.format(mod, line))
self.errors.report(-1, -1,
'INTERNAL ERROR: maximum semantic analysis iteration count reached',
blocker=True)
def add_plugin_dependency(self, trigger: str, target: Optional[str] = None) -> None:
"""Add dependency from trigger to a target.
If the target is not given explicitly, use the current target.
"""
if target is None:
target = self.scope.current_target()
self.cur_mod_node.plugin_deps.setdefault(trigger, set()).add(target)
def add_type_alias_deps(self,
aliases_used: Iterable[str],
target: Optional[str] = None) -> None:
"""Add full names of type aliases on which the current node depends.
This is used by fine-grained incremental mode to re-check the corresponding nodes.
If `target` is None, then the target node used will be the current scope.
"""
if not aliases_used:
# A basic optimization to avoid adding targets with no dependencies to
# the `alias_deps` dict.
return
if target is None:
target = self.scope.current_target()
self.cur_mod_node.alias_deps[target].update(aliases_used)
def is_mangled_global(self, name: str) -> bool:
# A global is mangled if there exists at least one renamed variant.
return unmangle(name) + "'" in self.globals
def is_initial_mangled_global(self, name: str) -> bool:
# If there are renamed definitions for a global, the first one has exactly one prime.
return name == unmangle(name) + "'"
def parse_bool(self, expr: Expression) -> Optional[bool]:
if isinstance(expr, NameExpr):
if expr.fullname == 'builtins.True':
return True
if expr.fullname == 'builtins.False':
return False
return None
def set_future_import_flags(self, module_name: str) -> None:
if module_name in FUTURE_IMPORTS:
self.future_import_flags.add(FUTURE_IMPORTS[module_name])
def is_future_flag_set(self, flag: str) -> bool:
return flag in self.future_import_flags
class HasPlaceholders(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_placeholder_type(self, t: PlaceholderType) -> bool:
return True
def has_placeholder(typ: Type) -> bool:
"""Check if a type contains any placeholder types (recursively)."""
return typ.accept(HasPlaceholders())
def replace_implicit_first_type(sig: FunctionLike, new: Type) -> FunctionLike:
if isinstance(sig, CallableType):
if len(sig.arg_types) == 0:
return sig
return sig.copy_modified(arg_types=[new] + sig.arg_types[1:])
elif isinstance(sig, Overloaded):
return Overloaded([cast(CallableType, replace_implicit_first_type(i, new))
for i in sig.items()])
else:
assert False
def refers_to_fullname(node: Expression, fullname: str) -> bool:
"""Is node a name or member expression with the given full name?"""
if not isinstance(node, RefExpr):
return False
if node.fullname == fullname:
return True
if isinstance(node.node, TypeAlias):
target = get_proper_type(node.node.target)
if isinstance(target, Instance) and target.type.fullname == fullname:
return True
return False
def refers_to_class_or_function(node: Expression) -> bool:
"""Does semantically analyzed node refer to a class?"""
return (isinstance(node, RefExpr) and
isinstance(node.node, (TypeInfo, FuncDef, OverloadedFuncDef)))
def find_duplicate(list: List[T]) -> Optional[T]:
"""If the list has duplicates, return one of the duplicates.
Otherwise, return None.
"""
for i in range(1, len(list)):
if list[i] in list[:i]:
return list[i]
return None
def remove_imported_names_from_symtable(names: SymbolTable,
module: str) -> None:
"""Remove all imported names from the symbol table of a module."""
removed: List[str] = []
for name, node in names.items():
if node.node is None:
continue
fullname = node.node.fullname
prefix = fullname[:fullname.rfind('.')]
if prefix != module:
removed.append(name)
for name in removed:
del names[name]
def make_any_non_explicit(t: Type) -> Type:
"""Replace all Any types within in with Any that has attribute 'explicit' set to False"""
return t.accept(MakeAnyNonExplicit())
class MakeAnyNonExplicit(TypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if t.type_of_any == TypeOfAny.explicit:
return t.copy_modified(TypeOfAny.special_form)
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t.copy_modified(args=[a.accept(self) for a in t.args])
def apply_semantic_analyzer_patches(patches: List[Tuple[int, Callable[[], None]]]) -> None:
"""Call patch callbacks in the right order.
This should happen after semantic analyzer pass 3.
"""
patches_by_priority = sorted(patches, key=lambda x: x[0])
for priority, patch_func in patches_by_priority:
patch_func()
def names_modified_by_assignment(s: AssignmentStmt) -> List[NameExpr]:
"""Return all unqualified (short) names assigned to in an assignment statement."""
result: List[NameExpr] = []
for lvalue in s.lvalues:
result += names_modified_in_lvalue(lvalue)
return result
def names_modified_in_lvalue(lvalue: Lvalue) -> List[NameExpr]:
"""Return all NameExpr assignment targets in an Lvalue."""
if isinstance(lvalue, NameExpr):
return [lvalue]
elif isinstance(lvalue, StarExpr):
return names_modified_in_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
result: List[NameExpr] = []
for item in lvalue.items:
result += names_modified_in_lvalue(item)
return result
return []
def is_same_var_from_getattr(n1: Optional[SymbolNode], n2: Optional[SymbolNode]) -> bool:
"""Do n1 and n2 refer to the same Var derived from module-level __getattr__?"""
return (isinstance(n1, Var)
and n1.from_module_getattr
and isinstance(n2, Var)
and n2.from_module_getattr
and n1.fullname == n2.fullname)
def dummy_context() -> Context:
return TempNode(AnyType(TypeOfAny.special_form))
def is_valid_replacement(old: SymbolTableNode, new: SymbolTableNode) -> bool:
"""Can symbol table node replace an existing one?
These are the only valid cases:
1. Placeholder gets replaced with a non-placeholder
2. Placeholder that isn't known to become type replaced with a
placeholder that can become a type
"""
if isinstance(old.node, PlaceholderNode):
if isinstance(new.node, PlaceholderNode):
return not old.node.becomes_typeinfo and new.node.becomes_typeinfo
else:
return True
return False
def is_same_symbol(a: Optional[SymbolNode], b: Optional[SymbolNode]) -> bool:
return (a == b
or (isinstance(a, PlaceholderNode)
and isinstance(b, PlaceholderNode))
or is_same_var_from_getattr(a, b))
| 45.269862 | 99 | 0.590956 |
from contextlib import contextmanager
from typing import (
List, Dict, Set, Tuple, cast, TypeVar, Union, Optional, Callable, Iterator, Iterable
)
from typing_extensions import Final
from mypy.nodes import (
MypyFile, TypeInfo, Node, AssignmentStmt, FuncDef, OverloadedFuncDef,
ClassDef, Var, GDEF, FuncItem, Import, Expression, Lvalue,
ImportFrom, ImportAll, Block, LDEF, NameExpr, MemberExpr,
IndexExpr, TupleExpr, ListExpr, ExpressionStmt, ReturnStmt,
RaiseStmt, AssertStmt, OperatorAssignmentStmt, WhileStmt,
ForStmt, BreakStmt, ContinueStmt, IfStmt, TryStmt, WithStmt, DelStmt,
GlobalDecl, SuperExpr, DictExpr, CallExpr, RefExpr, OpExpr, UnaryExpr,
SliceExpr, CastExpr, RevealExpr, TypeApplication, Context, SymbolTable,
SymbolTableNode, ListComprehension, GeneratorExpr,
LambdaExpr, MDEF, Decorator, SetExpr, TypeVarExpr,
StrExpr, BytesExpr, PrintStmt, ConditionalExpr, PromoteExpr,
ComparisonExpr, StarExpr, ArgKind, ARG_POS, ARG_NAMED, type_aliases,
YieldFromExpr, NamedTupleExpr, NonlocalDecl, SymbolNode,
SetComprehension, DictionaryComprehension, TypeAlias, TypeAliasExpr,
YieldExpr, ExecStmt, BackquoteExpr, ImportBase, AwaitExpr,
IntExpr, FloatExpr, UnicodeExpr, TempNode, OverloadPart,
PlaceholderNode, COVARIANT, CONTRAVARIANT, INVARIANT,
get_nongen_builtins, get_member_expr_fullname, REVEAL_TYPE,
REVEAL_LOCALS, is_final_node, TypedDictExpr, type_aliases_source_versions,
EnumCallExpr, RUNTIME_PROTOCOL_DECOS, FakeExpression, Statement, AssignmentExpr,
ParamSpecExpr, EllipsisExpr
)
from mypy.tvar_scope import TypeVarLikeScope
from mypy.typevars import fill_typevars
from mypy.visitor import NodeVisitor
from mypy.errors import Errors, report_internal_error
from mypy.messages import (
best_matches, MessageBuilder, pretty_seq, SUGGESTED_TEST_FIXTURES, TYPES_FOR_UNIMPORTED_HINTS
)
from mypy.errorcodes import ErrorCode
from mypy import message_registry, errorcodes as codes
from mypy.types import (
FunctionLike, UnboundType, TypeVarDef, TupleType, UnionType, StarType,
CallableType, Overloaded, Instance, Type, AnyType, LiteralType, LiteralValue,
TypeTranslator, TypeOfAny, TypeType, NoneType, PlaceholderType, TPDICT_NAMES, ProperType,
get_proper_type, get_proper_types, TypeAliasType
)
from mypy.typeops import function_type
from mypy.type_visitor import TypeQuery
from mypy.nodes import implicit_module_attrs
from mypy.typeanal import (
TypeAnalyser, analyze_type_alias, no_subscript_builtin_alias,
TypeVarLikeQuery, TypeVarLikeList, remove_dups, has_any_from_unimported_type,
check_for_explicit_any, type_constructors, fix_instance_types
)
from mypy.exprtotype import expr_to_unanalyzed_type, TypeTranslationError
from mypy.options import Options
from mypy.plugin import (
Plugin, ClassDefContext, SemanticAnalyzerPluginInterface,
DynamicClassDefContext
)
from mypy.util import (
correct_relative_import, unmangle, module_prefix, is_typeshed_file, unnamed_function,
)
from mypy.scope import Scope
from mypy.semanal_shared import (
SemanticAnalyzerInterface, set_callable_name, calculate_tuple_fallback, PRIORITY_FALLBACKS
)
from mypy.semanal_namedtuple import NamedTupleAnalyzer
from mypy.semanal_typeddict import TypedDictAnalyzer
from mypy.semanal_enum import EnumCallAnalyzer
from mypy.semanal_newtype import NewTypeAnalyzer
from mypy.reachability import (
infer_reachability_of_if_statement, infer_condition_value, ALWAYS_FALSE, ALWAYS_TRUE,
MYPY_TRUE, MYPY_FALSE
)
from mypy.mro import calculate_mro, MroError
T = TypeVar('T')
FUTURE_IMPORTS: Final = {
'__future__.nested_scopes': 'nested_scopes',
'__future__.generators': 'generators',
'__future__.division': 'division',
'__future__.absolute_import': 'absolute_import',
'__future__.with_statement': 'with_statement',
'__future__.print_function': 'print_function',
'__future__.unicode_literals': 'unicode_literals',
'__future__.barry_as_FLUFL': 'barry_as_FLUFL',
'__future__.generator_stop': 'generator_stop',
'__future__.annotations': 'annotations',
}
CORE_BUILTIN_CLASSES: Final = ["object", "bool", "function"]
Tag = int
class SemanticAnalyzer(NodeVisitor[None],
SemanticAnalyzerInterface,
SemanticAnalyzerPluginInterface):
__deletable__ = ['patches', 'options', 'cur_mod_node']
modules: Dict[str, MypyFile]
globals: SymbolTable
global_decls: List[Set[str]]
nonlocal_decls: List[Set[str]]
locals: List[Optional[SymbolTable]]
is_comprehension_stack: List[bool]
block_depth: List[int]
type: Optional[TypeInfo] = None
type_stack: List[Optional[TypeInfo]]
tvar_scope: TypeVarLikeScope
options: Options
function_stack: List[FuncItem]
progress = False
deferred = False
incomplete = False
_final_iteration = False
# Note that missing names are per module, _not_ per namespace. This means that e.g.
# a missing name at global scope will block adding same name at a class scope.
# This should not affect correctness and is purely a performance issue,
# since it can cause unnecessary deferrals. These are represented as
# PlaceholderNodes in the symbol table. We use this to ensure that the first
# definition takes precedence even if it's incomplete.
missing_names: List[Set[str]]
patches: List[Tuple[int, Callable[[], None]]]
loop_depth = 0
cur_mod_id = ''
_is_stub_file = False
_is_typeshed_stub_file = False
imports: Set[str]
errors: Errors
plugin: Plugin
statement: Optional[Statement] = None
future_import_flags: Set[str]
# return type has already been wrapped, by checking if the function definition's
wrapped_coro_return_types: Dict[FuncDef, Type] = {}
def __init__(self,
modules: Dict[str, MypyFile],
missing_modules: Set[str],
incomplete_namespaces: Set[str],
errors: Errors,
plugin: Plugin) -> None:
self.locals = [None]
self.is_comprehension_stack = [False]
self.saved_locals: Dict[
Union[FuncItem, GeneratorExpr, DictionaryComprehension], SymbolTable
] = {}
self.imports = set()
self.type = None
self.type_stack = []
self.incomplete_type_stack: List[bool] = []
self.tvar_scope = TypeVarLikeScope()
self.function_stack = []
self.block_depth = [0]
self.loop_depth = 0
self.errors = errors
self.modules = modules
self.msg = MessageBuilder(errors, modules)
self.missing_modules = missing_modules
self.missing_names = [set()]
self.incomplete_namespaces = incomplete_namespaces
self.all_exports: List[str] = []
# Map from module id to list of explicitly exported names (i.e. names in __all__).
self.export_map: Dict[str, List[str]] = {}
self.plugin = plugin
# If True, process function definitions. If False, don't. This is used
self.recurse_into_functions = True
self.scope = Scope()
self.deferral_debug_context: List[Tuple[str, int]] = []
self.future_import_flags: Set[str] = set()
# with a regular attribute so we make them properties
@property
def is_stub_file(self) -> bool:
return self._is_stub_file
@property
def is_typeshed_stub_file(self) -> bool:
return self._is_typeshed_stub_file
@property
def final_iteration(self) -> bool:
return self._final_iteration
#
# Preparing module (performed before semantic analysis)
#
def prepare_file(self, file_node: MypyFile) -> None:
if 'builtins' in self.modules:
file_node.names['__builtins__'] = SymbolTableNode(GDEF,
self.modules['builtins'])
if file_node.fullname == 'builtins':
self.prepare_builtins_namespace(file_node)
if file_node.fullname == 'typing':
self.prepare_typing_namespace(file_node)
def prepare_typing_namespace(self, file_node: MypyFile) -> None:
# This is all pretty unfortunate. typeshed now has a
# sys.version_info check for OrderedDict, and we shouldn't
def helper(defs: List[Statement]) -> None:
for stmt in defs.copy():
if isinstance(stmt, IfStmt):
for body in stmt.body:
helper(body.body)
if stmt.else_body:
helper(stmt.else_body.body)
if (isinstance(stmt, AssignmentStmt) and len(stmt.lvalues) == 1 and
isinstance(stmt.lvalues[0], NameExpr)):
if 'typing.' + stmt.lvalues[0].name in type_aliases:
defs.remove(stmt)
helper(file_node.defs)
def prepare_builtins_namespace(self, file_node: MypyFile) -> None:
names = file_node.names
for name in CORE_BUILTIN_CLASSES:
cdef = ClassDef(name, Block([]))
info = TypeInfo(SymbolTable(), cdef, 'builtins')
info._fullname = 'builtins.%s' % name
names[name] = SymbolTableNode(GDEF, info)
bool_info = names['bool'].node
assert isinstance(bool_info, TypeInfo)
bool_type = Instance(bool_info, [])
special_var_types: List[Tuple[str, Type]] = [
('None', NoneType()),
('reveal_type', AnyType(TypeOfAny.special_form)),
('reveal_locals', AnyType(TypeOfAny.special_form)),
('True', bool_type),
('False', bool_type),
('__debug__', bool_type),
]
for name, typ in special_var_types:
v = Var(name, typ)
v._fullname = 'builtins.%s' % name
file_node.names[name] = SymbolTableNode(GDEF, v)
def refresh_partial(self,
node: Union[MypyFile, FuncDef, OverloadedFuncDef],
patches: List[Tuple[int, Callable[[], None]]],
final_iteration: bool,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> None:
self.patches = patches
self.deferred = False
self.incomplete = False
self._final_iteration = final_iteration
self.missing_names[-1] = set()
with self.file_context(file_node, options, active_type):
if isinstance(node, MypyFile):
self.refresh_top_level(node)
else:
self.recurse_into_functions = True
self.accept(node)
del self.patches
def refresh_top_level(self, file_node: MypyFile) -> None:
self.recurse_into_functions = False
self.add_implicit_module_attrs(file_node)
for d in file_node.defs:
self.accept(d)
if file_node.fullname == 'typing':
self.add_builtin_aliases(file_node)
self.adjust_public_exports()
self.export_map[self.cur_mod_id] = self.all_exports
self.all_exports = []
def add_implicit_module_attrs(self, file_node: MypyFile) -> None:
for name, t in implicit_module_attrs.items():
if name == '__doc__':
if self.options.python_version >= (3, 0):
typ: Type = UnboundType("__builtins__.str")
else:
typ = UnionType([UnboundType('__builtins__.str'),
UnboundType('__builtins__.unicode')])
else:
assert t is not None, 'type should be specified for {}'.format(name)
typ = UnboundType(t)
existing = file_node.names.get(name)
if existing is not None and not isinstance(existing.node, PlaceholderNode):
continue
an_type = self.anal_type(typ)
if an_type:
var = Var(name, an_type)
var._fullname = self.qualified_name(name)
var.is_ready = True
self.add_symbol(name, var, dummy_context())
else:
self.add_symbol(name,
PlaceholderNode(self.qualified_name(name), file_node, -1),
dummy_context())
def add_builtin_aliases(self, tree: MypyFile) -> None:
assert tree.fullname == 'typing'
for alias, target_name in type_aliases.items():
if type_aliases_source_versions[alias] > self.options.python_version:
continue
name = alias.split('.')[-1]
if name in tree.names and not isinstance(tree.names[name].node, PlaceholderNode):
continue
tag = self.track_incomplete_refs()
n = self.lookup_fully_qualified_or_none(target_name)
if n:
if isinstance(n.node, PlaceholderNode):
self.mark_incomplete(name, tree)
else:
target = self.named_type_or_none(target_name, [])
assert target is not None
fix_instance_types(target, self.fail, self.note, self.options.python_version)
alias_node = TypeAlias(target, alias,
line=-1, column=-1,
no_args=True, normalized=True)
self.add_symbol(name, alias_node, tree)
elif self.found_incomplete_ref(tag):
self.mark_incomplete(name, tree)
else:
if name in tree.names:
assert isinstance(tree.names[name].node, PlaceholderNode)
del tree.names[name]
def adjust_public_exports(self) -> None:
if '__all__' in self.globals:
for name, g in self.globals.items():
if name in self.all_exports:
g.module_public = True
g.module_hidden = False
else:
g.module_public = False
@contextmanager
def file_context(self,
file_node: MypyFile,
options: Options,
active_type: Optional[TypeInfo] = None) -> Iterator[None]:
scope = self.scope
self.options = options
self.errors.set_file(file_node.path, file_node.fullname, scope=scope)
self.cur_mod_node = file_node
self.cur_mod_id = file_node.fullname
scope.enter_file(self.cur_mod_id)
self._is_stub_file = file_node.path.lower().endswith('.pyi')
self._is_typeshed_stub_file = is_typeshed_file(file_node.path)
self.globals = file_node.names
self.tvar_scope = TypeVarLikeScope()
self.named_tuple_analyzer = NamedTupleAnalyzer(options, self)
self.typed_dict_analyzer = TypedDictAnalyzer(options, self, self.msg)
self.enum_call_analyzer = EnumCallAnalyzer(options, self)
self.newtype_analyzer = NewTypeAnalyzer(options, self, self.msg)
self.num_incomplete_refs = 0
if active_type:
self.incomplete_type_stack.append(False)
scope.enter_class(active_type)
self.enter_class(active_type.defn.info)
for tvar in active_type.defn.type_vars:
self.tvar_scope.bind_existing(tvar)
yield
if active_type:
scope.leave()
self.leave_class()
self.type = None
self.incomplete_type_stack.pop()
scope.leave()
del self.options
def visit_func_def(self, defn: FuncDef) -> None:
self.statement = defn
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
defn.is_conditional = self.block_depth[-1] > 0
# to a symbol table. For example, for overload items.
defn._fullname = self.qualified_name(defn.name)
# We don't add module top-level functions to symbol tables
if not self.recurse_into_functions or len(self.function_stack) > 0:
if not defn.is_decorated and not defn.is_overload:
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
self.analyze_func_def(defn)
def analyze_func_def(self, defn: FuncDef) -> None:
self.function_stack.append(defn)
if defn.type:
assert isinstance(defn.type, CallableType)
self.update_function_type_variables(defn.type, defn)
self.function_stack.pop()
if self.is_class_scope():
# Method definition
assert self.type is not None
defn.info = self.type
if defn.type is not None and defn.name in ('__init__', '__init_subclass__'):
assert isinstance(defn.type, CallableType)
if isinstance(get_proper_type(defn.type.ret_type), AnyType):
defn.type = defn.type.copy_modified(ret_type=NoneType())
self.prepare_method_signature(defn, self.type)
# Analyze function signature
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
if defn.type:
self.check_classvar_in_signature(defn.type)
assert isinstance(defn.type, CallableType)
# Signature must be analyzed in the surrounding scope so that
# class-level imported names and type variables are in scope.
analyzer = self.type_analyzer()
tag = self.track_incomplete_refs()
result = analyzer.visit_callable_type(defn.type, nested=False)
# Don't store not ready types (including placeholders).
if self.found_incomplete_ref(tag) or has_placeholder(result):
self.defer(defn)
return
assert isinstance(result, ProperType)
defn.type = result
self.add_type_alias_deps(analyzer.aliases_used)
self.check_function_signature(defn)
if isinstance(defn, FuncDef):
assert isinstance(defn.type, CallableType)
defn.type = set_callable_name(defn.type, defn)
self.analyze_arg_initializers(defn)
self.analyze_function_body(defn)
if (defn.is_coroutine and
isinstance(defn.type, CallableType) and
self.wrapped_coro_return_types.get(defn) != defn.type):
if defn.is_async_generator:
pass
else:
any_type = AnyType(TypeOfAny.special_form)
ret_type = self.named_type_or_none('typing.Coroutine',
[any_type, any_type, defn.type.ret_type])
assert ret_type is not None, "Internal error: typing.Coroutine not found"
defn.type = defn.type.copy_modified(ret_type=ret_type)
self.wrapped_coro_return_types[defn] = defn.type
def prepare_method_signature(self, func: FuncDef, info: TypeInfo) -> None:
functype = func.type
if not func.is_static:
if func.name in ['__init_subclass__', '__class_getitem__']:
func.is_class = True
if not func.arguments:
self.fail('Method must have at least one argument', func)
elif isinstance(functype, CallableType):
self_type = get_proper_type(functype.arg_types[0])
if isinstance(self_type, AnyType):
leading_type: Type = fill_typevars(info)
if func.is_class or func.name == '__new__':
leading_type = self.class_type(leading_type)
func.type = replace_implicit_first_type(functype, leading_type)
def set_original_def(self, previous: Optional[Node], new: Union[FuncDef, Decorator]) -> bool:
if isinstance(new, Decorator):
new = new.func
if (
isinstance(previous, (FuncDef, Decorator))
and unnamed_function(new.name)
and unnamed_function(previous.name)
):
return True
if isinstance(previous, (FuncDef, Var, Decorator)) and new.is_conditional:
new.original_def = previous
return True
else:
return False
def update_function_type_variables(self, fun_type: CallableType, defn: FuncItem) -> None:
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
a = self.type_analyzer()
fun_type.variables = a.bind_function_type_variables(fun_type, defn)
def visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
self.statement = defn
self.add_function_to_symbol_table(defn)
if not self.recurse_into_functions:
return
with self.scope.function_scope(defn):
self.analyze_overloaded_func_def(defn)
def analyze_overloaded_func_def(self, defn: OverloadedFuncDef) -> None:
defn._fullname = self.qualified_name(defn.name)
defn.items = defn.unanalyzed_items.copy()
first_item = defn.items[0]
first_item.is_overload = True
first_item.accept(self)
if isinstance(first_item, Decorator) and first_item.func.is_property:
first_item.func.is_overload = True
self.analyze_property_with_multi_part_definition(defn)
typ = function_type(first_item.func, self.builtin_type('builtins.function'))
assert isinstance(typ, CallableType)
types = [typ]
else:
types, impl, non_overload_indexes = self.analyze_overload_sigs_and_impl(defn)
defn.impl = impl
if non_overload_indexes:
self.handle_missing_overload_decorators(defn, non_overload_indexes,
some_overload_decorators=len(types) > 0)
if impl is not None:
assert impl is defn.items[-1]
defn.items = defn.items[:-1]
elif not non_overload_indexes:
self.handle_missing_overload_implementation(defn)
if types:
defn.type = Overloaded(types)
defn.type.line = defn.line
if not defn.items:
# It was not a real overload after all, but function redefinition. We've
if not defn.impl:
defn.impl = defn.unanalyzed_items[-1]
return
self.process_final_in_overload(defn)
self.process_static_or_class_method_in_overload(defn)
def analyze_overload_sigs_and_impl(
self,
defn: OverloadedFuncDef) -> Tuple[List[CallableType],
Optional[OverloadPart],
List[int]]:
types = []
non_overload_indexes = []
impl: Optional[OverloadPart] = None
for i, item in enumerate(defn.items):
if i != 0:
item.is_overload = True
item.accept(self)
if isinstance(item, Decorator):
callable = function_type(item.func, self.builtin_type('builtins.function'))
assert isinstance(callable, CallableType)
if not any(refers_to_fullname(dec, 'typing.overload')
for dec in item.decorators):
if i == len(defn.items) - 1 and not self.is_stub_file:
impl = item
else:
# will vary based on where in the list it is, record
# that.
non_overload_indexes.append(i)
else:
item.func.is_overload = True
types.append(callable)
elif isinstance(item, FuncDef):
if i == len(defn.items) - 1 and not self.is_stub_file:
impl = item
else:
non_overload_indexes.append(i)
return types, impl, non_overload_indexes
def handle_missing_overload_decorators(self,
defn: OverloadedFuncDef,
non_overload_indexes: List[int],
some_overload_decorators: bool) -> None:
if some_overload_decorators:
# Some of them were overloads, but not all.
for idx in non_overload_indexes:
if self.is_stub_file:
self.fail("An implementation for an overloaded function "
"is not allowed in a stub file", defn.items[idx])
else:
self.fail("The implementation for an overloaded function "
"must come last", defn.items[idx])
else:
for idx in non_overload_indexes[1:]:
self.name_already_defined(defn.name, defn.items[idx], defn.items[0])
if defn.impl:
self.name_already_defined(defn.name, defn.impl, defn.items[0])
# Remove the non-overloads
for idx in reversed(non_overload_indexes):
del defn.items[idx]
def handle_missing_overload_implementation(self, defn: OverloadedFuncDef) -> None:
if not self.is_stub_file:
if self.type and self.type.is_protocol and not self.is_func_scope():
# An overloaded protocol method doesn't need an implementation.
for item in defn.items:
if isinstance(item, Decorator):
item.func.is_abstract = True
else:
item.is_abstract = True
else:
self.fail(
"An overloaded function outside a stub file must have an implementation",
defn)
def process_final_in_overload(self, defn: OverloadedFuncDef) -> None:
if any(item.is_final for item in defn.items):
defn.is_final = True
bad_final = next(ov for ov in defn.items if ov.is_final)
if not self.is_stub_file:
self.fail("@final should be applied only to overload implementation",
bad_final)
elif any(item.is_final for item in defn.items[1:]):
bad_final = next(ov for ov in defn.items[1:] if ov.is_final)
self.fail("In a stub file @final must be applied only to the first overload",
bad_final)
if defn.impl is not None and defn.impl.is_final:
defn.is_final = True
def process_static_or_class_method_in_overload(self, defn: OverloadedFuncDef) -> None:
class_status = []
static_status = []
for item in defn.items:
if isinstance(item, Decorator):
inner = item.func
elif isinstance(item, FuncDef):
inner = item
else:
assert False, "The 'item' variable is an unexpected type: {}".format(type(item))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if defn.impl is not None:
if isinstance(defn.impl, Decorator):
inner = defn.impl.func
elif isinstance(defn.impl, FuncDef):
inner = defn.impl
else:
assert False, "Unexpected impl type: {}".format(type(defn.impl))
class_status.append(inner.is_class)
static_status.append(inner.is_static)
if len(set(class_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('classmethod', defn)
elif len(set(static_status)) != 1:
self.msg.overload_inconsistently_applies_decorator('staticmethod', defn)
else:
defn.is_class = class_status[0]
defn.is_static = static_status[0]
def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) -> None:
defn.is_property = True
items = defn.items
first_item = cast(Decorator, defn.items[0])
deleted_items = []
for i, item in enumerate(items[1:]):
if isinstance(item, Decorator):
if len(item.decorators) == 1:
node = item.decorators[0]
if isinstance(node, MemberExpr):
if node.name == 'setter':
first_item.var.is_settable_property = True
item.func.is_abstract = first_item.func.is_abstract
else:
self.fail("Decorated property not supported", item)
item.func.accept(self)
else:
self.fail('Unexpected definition for property "{}"'.format(first_item.func.name),
item)
deleted_items.append(i + 1)
for i in reversed(deleted_items):
del items[i]
def add_function_to_symbol_table(self, func: Union[FuncDef, OverloadedFuncDef]) -> None:
if self.is_class_scope():
assert self.type is not None
func.info = self.type
func._fullname = self.qualified_name(func.name)
self.add_symbol(func.name, func, func)
def analyze_arg_initializers(self, defn: FuncItem) -> None:
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
for arg in defn.arguments:
if arg.initializer:
arg.initializer.accept(self)
def analyze_function_body(self, defn: FuncItem) -> None:
is_method = self.is_class_scope()
with self.tvar_scope_frame(self.tvar_scope.method_frame()):
if defn.type:
a = self.type_analyzer()
a.bind_function_type_variables(cast(CallableType, defn.type), defn)
self.function_stack.append(defn)
self.enter(defn)
for arg in defn.arguments:
self.add_local(arg.variable, defn)
# instance type.
if is_method and not defn.is_static and not defn.is_class and defn.arguments:
defn.arguments[0].variable.is_self = True
defn.body.accept(self)
self.leave()
self.function_stack.pop()
def check_classvar_in_signature(self, typ: ProperType) -> None:
if isinstance(typ, Overloaded):
for t in typ.items(): # type: ProperType
self.check_classvar_in_signature(t)
return
if not isinstance(typ, CallableType):
return
for t in get_proper_types(typ.arg_types) + [get_proper_type(typ.ret_type)]:
if self.is_classvar(t):
self.fail_invalid_classvar(t)
# Show only one error per signature
break
def check_function_signature(self, fdef: FuncItem) -> None:
sig = fdef.type
assert isinstance(sig, CallableType)
if len(sig.arg_types) < len(fdef.arguments):
self.fail('Type signature has too few arguments', fdef)
# Add dummy Any arguments to prevent crashes later.
num_extra_anys = len(fdef.arguments) - len(sig.arg_types)
extra_anys = [AnyType(TypeOfAny.from_error)] * num_extra_anys
sig.arg_types.extend(extra_anys)
elif len(sig.arg_types) > len(fdef.arguments):
self.fail('Type signature has too many arguments', fdef, blocker=True)
def visit_decorator(self, dec: Decorator) -> None:
self.statement = dec
# TODO: better don't modify them at all.
dec.decorators = dec.original_decorators.copy()
dec.func.is_conditional = self.block_depth[-1] > 0
if not dec.is_overload:
self.add_symbol(dec.name, dec, dec)
dec.func._fullname = self.qualified_name(dec.name)
for d in dec.decorators:
d.accept(self)
removed: List[int] = []
no_type_check = False
for i, d in enumerate(dec.decorators):
if refers_to_fullname(d, 'abc.abstractmethod'):
removed.append(i)
dec.func.is_abstract = True
self.check_decorated_function_is_method('abstractmethod', dec)
elif (refers_to_fullname(d, 'asyncio.coroutines.coroutine') or
refers_to_fullname(d, 'types.coroutine')):
removed.append(i)
dec.func.is_awaitable_coroutine = True
elif refers_to_fullname(d, 'builtins.staticmethod'):
removed.append(i)
dec.func.is_static = True
dec.var.is_staticmethod = True
self.check_decorated_function_is_method('staticmethod', dec)
elif refers_to_fullname(d, 'builtins.classmethod'):
removed.append(i)
dec.func.is_class = True
dec.var.is_classmethod = True
self.check_decorated_function_is_method('classmethod', dec)
elif (refers_to_fullname(d, 'builtins.property') or
refers_to_fullname(d, 'abc.abstractproperty') or
refers_to_fullname(d, 'functools.cached_property')):
removed.append(i)
dec.func.is_property = True
dec.var.is_property = True
if refers_to_fullname(d, 'abc.abstractproperty'):
dec.func.is_abstract = True
elif refers_to_fullname(d, 'functools.cached_property'):
dec.var.is_settable_property = True
self.check_decorated_function_is_method('property', dec)
if len(dec.func.arguments) > 1:
self.fail('Too many arguments', dec.func)
elif refers_to_fullname(d, 'typing.no_type_check'):
dec.var.type = AnyType(TypeOfAny.special_form)
no_type_check = True
elif (refers_to_fullname(d, 'typing.final') or
refers_to_fullname(d, 'typing_extensions.final')):
if self.is_class_scope():
assert self.type is not None, "No type set at class scope"
if self.type.is_protocol:
self.msg.protocol_members_cant_be_final(d)
else:
dec.func.is_final = True
dec.var.is_final = True
removed.append(i)
else:
self.fail("@final cannot be used with non-method functions", d)
for i in reversed(removed):
del dec.decorators[i]
if (not dec.is_overload or dec.var.is_property) and self.type:
dec.var.info = self.type
dec.var.is_initialized_in_class = True
if not no_type_check and self.recurse_into_functions:
dec.func.accept(self)
if dec.decorators and dec.var.is_property:
self.fail('Decorated property not supported', dec)
def check_decorated_function_is_method(self, decorator: str,
context: Context) -> None:
if not self.type or self.is_func_scope():
self.fail('"%s" used with a non-method' % decorator, context)
def visit_class_def(self, defn: ClassDef) -> None:
self.statement = defn
self.incomplete_type_stack.append(not defn.info)
with self.tvar_scope_frame(self.tvar_scope.class_frame()):
self.analyze_class(defn)
self.incomplete_type_stack.pop()
def analyze_class(self, defn: ClassDef) -> None:
fullname = self.qualified_name(defn.name)
if not defn.info and not self.is_core_builtin_class(defn):
# are no incomplete references, we'll replace this with a TypeInfo
placeholder = PlaceholderNode(fullname, defn, defn.line, becomes_typeinfo=True)
self.add_symbol(defn.name, placeholder, defn, can_defer=False)
tag = self.track_incomplete_refs()
defn.base_type_exprs.extend(defn.removed_base_type_exprs)
defn.removed_base_type_exprs.clear()
self.update_metaclass(defn)
bases = defn.base_type_exprs
bases, tvar_defs, is_protocol = self.clean_up_bases_and_infer_type_variables(defn, bases,
context=defn)
for tvd in tvar_defs:
if any(has_placeholder(t) for t in [tvd.upper_bound] + tvd.values):
self.defer()
self.analyze_class_keywords(defn)
result = self.analyze_base_classes(bases)
if result is None or self.found_incomplete_ref(tag):
self.mark_incomplete(defn.name, defn)
return
base_types, base_error = result
if any(isinstance(base, PlaceholderType) for base, _ in base_types):
self.mark_incomplete(defn.name, defn)
return
is_typeddict, info = self.typed_dict_analyzer.analyze_typeddict_classdef(defn)
if is_typeddict:
for decorator in defn.decorators:
decorator.accept(self)
if isinstance(decorator, RefExpr):
if decorator.fullname in ('typing.final',
'typing_extensions.final'):
self.fail("@final cannot be used with TypedDict", decorator)
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
return
if self.analyze_namedtuple_classdef(defn):
return
# Create TypeInfo for class now that base classes and the MRO can be calculated.
self.prepare_class_def(defn)
defn.type_vars = tvar_defs
defn.info.type_vars = [tvar.name for tvar in tvar_defs]
if base_error:
defn.info.fallback_to_any = True
with self.scope.class_scope(defn.info):
self.configure_base_classes(defn, base_types)
defn.info.is_protocol = is_protocol
self.analyze_metaclass(defn)
defn.info.runtime_protocol = False
for decorator in defn.decorators:
self.analyze_class_decorator(defn, decorator)
self.analyze_class_body_common(defn)
def is_core_builtin_class(self, defn: ClassDef) -> bool:
return self.cur_mod_id == 'builtins' and defn.name in CORE_BUILTIN_CLASSES
def analyze_class_body_common(self, defn: ClassDef) -> None:
self.enter_class(defn.info)
defn.defs.accept(self)
self.apply_class_plugin_hooks(defn)
self.leave_class()
def analyze_namedtuple_classdef(self, defn: ClassDef) -> bool:
if defn.info and defn.info.is_named_tuple:
# Don't reprocess everything. We just need to process methods defined
is_named_tuple, info = True, defn.info
else:
is_named_tuple, info = self.named_tuple_analyzer.analyze_namedtuple_classdef(
defn, self.is_stub_file)
if is_named_tuple:
if info is None:
self.mark_incomplete(defn.name, defn)
else:
self.prepare_class_def(defn, info)
with self.scope.class_scope(defn.info):
with self.named_tuple_analyzer.save_namedtuple_body(info):
self.analyze_class_body_common(defn)
return True
return False
def apply_class_plugin_hooks(self, defn: ClassDef) -> None:
def get_fullname(expr: Expression) -> Optional[str]:
if isinstance(expr, CallExpr):
return get_fullname(expr.callee)
elif isinstance(expr, IndexExpr):
return get_fullname(expr.base)
elif isinstance(expr, RefExpr):
if expr.fullname:
return expr.fullname
# analyzed in a different manner (see exprtotype.py) and therefore those AST
# nodes will not have full names.
sym = self.lookup_type_node(expr)
if sym:
return sym.fullname
return None
for decorator in defn.decorators:
decorator_name = get_fullname(decorator)
if decorator_name:
hook = self.plugin.get_class_decorator_hook(decorator_name)
if hook:
hook(ClassDefContext(defn, decorator, self))
if defn.metaclass:
metaclass_name = get_fullname(defn.metaclass)
if metaclass_name:
hook = self.plugin.get_metaclass_hook(metaclass_name)
if hook:
hook(ClassDefContext(defn, defn.metaclass, self))
for base_expr in defn.base_type_exprs:
base_name = get_fullname(base_expr)
if base_name:
hook = self.plugin.get_base_class_hook(base_name)
if hook:
hook(ClassDefContext(defn, base_expr, self))
def analyze_class_keywords(self, defn: ClassDef) -> None:
for value in defn.keywords.values():
value.accept(self)
def enter_class(self, info: TypeInfo) -> None:
# Remember previous active class
self.type_stack.append(self.type)
self.locals.append(None) # Add class scope
self.is_comprehension_stack.append(False)
self.block_depth.append(-1) # The class body increments this to 0
self.type = info
self.missing_names.append(set())
def leave_class(self) -> None:
self.block_depth.pop()
self.locals.pop()
self.is_comprehension_stack.pop()
self.type = self.type_stack.pop()
self.missing_names.pop()
def analyze_class_decorator(self, defn: ClassDef, decorator: Expression) -> None:
decorator.accept(self)
if isinstance(decorator, RefExpr):
if decorator.fullname in RUNTIME_PROTOCOL_DECOS:
if defn.info.is_protocol:
defn.info.runtime_protocol = True
else:
self.fail('@runtime_checkable can only be used with protocol classes',
defn)
elif decorator.fullname in ('typing.final',
'typing_extensions.final'):
defn.info.is_final = True
def clean_up_bases_and_infer_type_variables(
self,
defn: ClassDef,
base_type_exprs: List[Expression],
context: Context) -> Tuple[List[Expression],
List[TypeVarDef],
bool]:
removed: List[int] = []
declared_tvars: TypeVarLikeList = []
is_protocol = False
for i, base_expr in enumerate(base_type_exprs):
self.analyze_type_expr(base_expr)
try:
base = self.expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
# This error will be caught later.
continue
result = self.analyze_class_typevar_declaration(base)
if result is not None:
if declared_tvars:
self.fail('Only single Generic[...] or Protocol[...] can be in bases', context)
removed.append(i)
tvars = result[0]
is_protocol |= result[1]
declared_tvars.extend(tvars)
if isinstance(base, UnboundType):
sym = self.lookup_qualified(base.name, base)
if sym is not None and sym.node is not None:
if (sym.node.fullname in ('typing.Protocol', 'typing_extensions.Protocol') and
i not in removed):
# also remove bare 'Protocol' bases
removed.append(i)
is_protocol = True
all_tvars = self.get_all_bases_tvars(base_type_exprs, removed)
if declared_tvars:
if len(remove_dups(declared_tvars)) < len(declared_tvars):
self.fail("Duplicate type variables in Generic[...] or Protocol[...]", context)
declared_tvars = remove_dups(declared_tvars)
if not set(all_tvars).issubset(set(declared_tvars)):
self.fail("If Generic[...] or Protocol[...] is present"
" it should list all type variables", context)
# In case of error, Generic tvars will go first
declared_tvars = remove_dups(declared_tvars + all_tvars)
else:
declared_tvars = all_tvars
for i in reversed(removed):
# We need to actually remove the base class expressions like Generic[T],
# mostly because otherwise they will create spurious dependencies in fine
# grained incremental mode.
defn.removed_base_type_exprs.append(defn.base_type_exprs[i])
del base_type_exprs[i]
tvar_defs: List[TypeVarDef] = []
for name, tvar_expr in declared_tvars:
tvar_def = self.tvar_scope.bind_new(name, tvar_expr)
assert isinstance(tvar_def, TypeVarDef), (
"mypy does not currently support ParamSpec use in generic classes"
)
tvar_defs.append(tvar_def)
return base_type_exprs, tvar_defs, is_protocol
def analyze_class_typevar_declaration(
self,
base: Type
) -> Optional[Tuple[TypeVarLikeList, bool]]:
if not isinstance(base, UnboundType):
return None
unbound = base
sym = self.lookup_qualified(unbound.name, unbound)
if sym is None or sym.node is None:
return None
if (sym.node.fullname == 'typing.Generic' or
sym.node.fullname == 'typing.Protocol' and base.args or
sym.node.fullname == 'typing_extensions.Protocol' and base.args):
is_proto = sym.node.fullname != 'typing.Generic'
tvars: TypeVarLikeList = []
for arg in unbound.args:
tag = self.track_incomplete_refs()
tvar = self.analyze_unbound_tvar(arg)
if tvar:
tvars.append(tvar)
elif not self.found_incomplete_ref(tag):
self.fail('Free type variable expected in %s[...]' %
sym.node.name, base)
return tvars, is_proto
return None
def analyze_unbound_tvar(self, t: Type) -> Optional[Tuple[str, TypeVarExpr]]:
if not isinstance(t, UnboundType):
return None
unbound = t
sym = self.lookup_qualified(unbound.name, unbound)
if sym and isinstance(sym.node, PlaceholderNode):
self.record_incomplete_ref()
if sym is None or not isinstance(sym.node, TypeVarExpr):
return None
elif sym.fullname and not self.tvar_scope.allow_binding(sym.fullname):
# It's bound by our type variable scope
return None
else:
assert isinstance(sym.node, TypeVarExpr)
return unbound.name, sym.node
def get_all_bases_tvars(self,
base_type_exprs: List[Expression],
removed: List[int]) -> TypeVarLikeList:
tvars: TypeVarLikeList = []
for i, base_expr in enumerate(base_type_exprs):
if i not in removed:
try:
base = self.expr_to_unanalyzed_type(base_expr)
except TypeTranslationError:
continue
base_tvars = base.accept(TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope))
tvars.extend(base_tvars)
return remove_dups(tvars)
def prepare_class_def(self, defn: ClassDef, info: Optional[TypeInfo] = None) -> None:
if not defn.info:
defn.fullname = self.qualified_name(defn.name)
info = info or self.make_empty_type_info(defn)
defn.info = info
info.defn = defn
if not self.is_func_scope():
info._fullname = self.qualified_name(defn.name)
else:
info._fullname = info.name
self.add_symbol(defn.name, defn.info, defn)
if self.is_nested_within_func_scope():
# in globals under mangled unique names
#
# TODO: Putting local classes into globals breaks assumptions in fine-grained
# incremental mode and we should avoid it. In general, this logic is too
# ad-hoc and needs to be removed/refactored.
if '@' not in defn.info._fullname:
local_name = defn.info.name + '@' + str(defn.line)
if defn.info.is_named_tuple:
# Module is already correctly set in _fullname for named tuples.
defn.info._fullname += '@' + str(defn.line)
else:
defn.info._fullname = self.cur_mod_id + '.' + local_name
else:
# Preserve name from previous fine-grained incremental run.
local_name = defn.info.name
defn.fullname = defn.info._fullname
self.globals[local_name] = SymbolTableNode(GDEF, defn.info)
def make_empty_type_info(self, defn: ClassDef) -> TypeInfo:
if (self.is_module_scope()
and self.cur_mod_id == 'builtins'
and defn.name in CORE_BUILTIN_CLASSES):
# Special case core built-in classes. A TypeInfo was already
# created for it before semantic analysis, but with a dummy
# ClassDef. Patch the real ClassDef object.
info = self.globals[defn.name].node
assert isinstance(info, TypeInfo)
else:
info = TypeInfo(SymbolTable(), defn, self.cur_mod_id)
info.set_line(defn)
return info
def get_name_repr_of_expr(self, expr: Expression) -> Optional[str]:
if isinstance(expr, NameExpr):
return expr.name
if isinstance(expr, MemberExpr):
return get_member_expr_fullname(expr)
if isinstance(expr, IndexExpr):
return self.get_name_repr_of_expr(expr.base)
if isinstance(expr, CallExpr):
return self.get_name_repr_of_expr(expr.callee)
return None
def analyze_base_classes(
self,
base_type_exprs: List[Expression]) -> Optional[Tuple[List[Tuple[ProperType,
Expression]],
bool]]:
is_error = False
bases = []
for base_expr in base_type_exprs:
if (isinstance(base_expr, RefExpr) and
base_expr.fullname in ('typing.NamedTuple',) + TPDICT_NAMES):
# Ignore magic bases for now.
continue
try:
base = self.expr_to_analyzed_type(base_expr, allow_placeholder=True)
except TypeTranslationError:
name = self.get_name_repr_of_expr(base_expr)
if isinstance(base_expr, CallExpr):
msg = 'Unsupported dynamic base class'
else:
msg = 'Invalid base class'
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
is_error = True
continue
if base is None:
return None
base = get_proper_type(base)
bases.append((base, base_expr))
return bases, is_error
def configure_base_classes(self,
defn: ClassDef,
bases: List[Tuple[ProperType, Expression]]) -> None:
base_types: List[Instance] = []
info = defn.info
info.tuple_type = None
for base, base_expr in bases:
if isinstance(base, TupleType):
actual_base = self.configure_tuple_base_class(defn, base, base_expr)
base_types.append(actual_base)
elif isinstance(base, Instance):
if base.type.is_newtype:
self.fail('Cannot subclass "NewType"', defn)
base_types.append(base)
elif isinstance(base, AnyType):
if self.options.disallow_subclassing_any:
if isinstance(base_expr, (NameExpr, MemberExpr)):
msg = 'Class cannot subclass "{}" (has type "Any")'.format(base_expr.name)
else:
msg = 'Class cannot subclass value of type "Any"'
self.fail(msg, base_expr)
info.fallback_to_any = True
else:
msg = 'Invalid base class'
name = self.get_name_repr_of_expr(base_expr)
if name:
msg += ' "{}"'.format(name)
self.fail(msg, base_expr)
info.fallback_to_any = True
if self.options.disallow_any_unimported and has_any_from_unimported_type(base):
if isinstance(base_expr, (NameExpr, MemberExpr)):
prefix = "Base type {}".format(base_expr.name)
else:
prefix = "Base type"
self.msg.unimported_type_becomes_any(prefix, base, base_expr)
check_for_explicit_any(base, self.options, self.is_typeshed_stub_file, self.msg,
context=base_expr)
# Add 'object' as implicit base if there is no other base class.
if not base_types and defn.fullname != 'builtins.object':
base_types.append(self.object_type())
info.bases = base_types
# Calculate the MRO.
if not self.verify_base_classes(defn):
self.set_dummy_mro(defn.info)
return
self.calculate_class_mro(defn, self.object_type)
def configure_tuple_base_class(self,
defn: ClassDef,
base: TupleType,
base_expr: Expression) -> Instance:
info = defn.info
# There may be an existing valid tuple type from previous semanal iterations.
# Use equality to check if it is the case.
if info.tuple_type and info.tuple_type != base:
self.fail("Class has two incompatible bases derived from tuple", defn)
defn.has_incompatible_baseclass = True
info.tuple_type = base
if isinstance(base_expr, CallExpr):
defn.analyzed = NamedTupleExpr(base.partial_fallback.type)
defn.analyzed.line = defn.line
defn.analyzed.column = defn.column
if base.partial_fallback.type.fullname == 'builtins.tuple':
# Fallback can only be safely calculated after semantic analysis, since base
# classes may be incomplete. Postpone the calculation.
self.schedule_patch(PRIORITY_FALLBACKS, lambda: calculate_tuple_fallback(base))
return base.partial_fallback
def set_dummy_mro(self, info: TypeInfo) -> None:
# Give it an MRO consisting of just the class itself and object.
info.mro = [info, self.object_type().type]
info.bad_mro = True
def calculate_class_mro(self, defn: ClassDef,
obj_type: Optional[Callable[[], Instance]] = None) -> None:
try:
calculate_mro(defn.info, obj_type)
except MroError:
self.fail('Cannot determine consistent method resolution '
'order (MRO) for "%s"' % defn.name, defn)
self.set_dummy_mro(defn.info)
# Allow plugins to alter the MRO to handle the fact that `def mro()`
# on metaclasses permits MRO rewriting.
if defn.fullname:
hook = self.plugin.get_customize_class_mro_hook(defn.fullname)
if hook:
hook(ClassDefContext(defn, FakeExpression(), self))
def update_metaclass(self, defn: ClassDef) -> None:
# Look for "__metaclass__ = <metaclass>" in Python 2
python2_meta_expr: Optional[Expression] = None
if self.options.python_version[0] == 2:
for body_node in defn.defs.body:
if isinstance(body_node, ClassDef) and body_node.name == "__metaclass__":
self.fail("Metaclasses defined as inner classes are not supported", body_node)
break
elif isinstance(body_node, AssignmentStmt) and len(body_node.lvalues) == 1:
lvalue = body_node.lvalues[0]
if isinstance(lvalue, NameExpr) and lvalue.name == "__metaclass__":
python2_meta_expr = body_node.rvalue
# Look for six.with_metaclass(M, B1, B2, ...)
with_meta_expr: Optional[Expression] = None
if len(defn.base_type_exprs) == 1:
base_expr = defn.base_type_exprs[0]
if isinstance(base_expr, CallExpr) and isinstance(base_expr.callee, RefExpr):
base_expr.accept(self)
if (base_expr.callee.fullname in {'six.with_metaclass',
'future.utils.with_metaclass',
'past.utils.with_metaclass'}
and len(base_expr.args) >= 1
and all(kind == ARG_POS for kind in base_expr.arg_kinds)):
with_meta_expr = base_expr.args[0]
defn.base_type_exprs = base_expr.args[1:]
# Look for @six.add_metaclass(M)
add_meta_expr: Optional[Expression] = None
for dec_expr in defn.decorators:
if isinstance(dec_expr, CallExpr) and isinstance(dec_expr.callee, RefExpr):
dec_expr.callee.accept(self)
if (dec_expr.callee.fullname == 'six.add_metaclass'
and len(dec_expr.args) == 1
and dec_expr.arg_kinds[0] == ARG_POS):
add_meta_expr = dec_expr.args[0]
break
metas = {defn.metaclass, python2_meta_expr, with_meta_expr, add_meta_expr} - {None}
if len(metas) == 0:
return
if len(metas) > 1:
self.fail("Multiple metaclass definitions", defn)
return
defn.metaclass = metas.pop()
def verify_base_classes(self, defn: ClassDef) -> bool:
info = defn.info
cycle = False
for base in info.bases:
baseinfo = base.type
if self.is_base_class(info, baseinfo):
self.fail('Cycle in inheritance hierarchy', defn)
cycle = True
if baseinfo.fullname == 'builtins.bool':
self.fail('"%s" is not a valid base class' %
baseinfo.name, defn, blocker=True)
return False
dup = find_duplicate(info.direct_base_classes())
if dup:
self.fail('Duplicate base class "%s"' % dup.name, defn, blocker=True)
return False
return not cycle
def is_base_class(self, t: TypeInfo, s: TypeInfo) -> bool:
# Search the base class graph for t, starting from s.
worklist = [s]
visited = {s}
while worklist:
nxt = worklist.pop()
if nxt == t:
return True
for base in nxt.bases:
if base.type not in visited:
worklist.append(base.type)
visited.add(base.type)
return False
def analyze_metaclass(self, defn: ClassDef) -> None:
if defn.metaclass:
metaclass_name = None
if isinstance(defn.metaclass, NameExpr):
metaclass_name = defn.metaclass.name
elif isinstance(defn.metaclass, MemberExpr):
metaclass_name = get_member_expr_fullname(defn.metaclass)
if metaclass_name is None:
self.fail('Dynamic metaclass not supported for "%s"' % defn.name, defn.metaclass)
return
sym = self.lookup_qualified(metaclass_name, defn.metaclass)
if sym is None:
# Probably a name error - it is already handled elsewhere
return
if isinstance(sym.node, Var) and isinstance(get_proper_type(sym.node.type), AnyType):
# 'Any' metaclass -- just ignore it.
#
# TODO: A better approach would be to record this information
# and assume that the type object supports arbitrary
# attributes, similar to an 'Any' base class.
return
if isinstance(sym.node, PlaceholderNode):
self.defer(defn)
return
if not isinstance(sym.node, TypeInfo) or sym.node.tuple_type is not None:
self.fail('Invalid metaclass "%s"' % metaclass_name, defn.metaclass)
return
if not sym.node.is_metaclass():
self.fail('Metaclasses not inheriting from "type" are not supported',
defn.metaclass)
return
inst = fill_typevars(sym.node)
assert isinstance(inst, Instance)
defn.info.declared_metaclass = inst
defn.info.metaclass_type = defn.info.calculate_metaclass_type()
if any(info.is_protocol for info in defn.info.mro):
if (not defn.info.metaclass_type or
defn.info.metaclass_type.type.fullname == 'builtins.type'):
# All protocols and their subclasses have ABCMeta metaclass by default.
# TODO: add a metaclass conflict check if there is another metaclass.
abc_meta = self.named_type_or_none('abc.ABCMeta', [])
if abc_meta is not None: # May be None in tests with incomplete lib-stub.
defn.info.metaclass_type = abc_meta
if defn.info.metaclass_type is None:
# Inconsistency may happen due to multiple baseclasses even in classes that
# do not declare explicit metaclass, but it's harder to catch at this stage
if defn.metaclass is not None:
self.fail('Inconsistent metaclass structure for "%s"' % defn.name, defn)
else:
if defn.info.metaclass_type.type.has_base('enum.EnumMeta'):
defn.info.is_enum = True
if defn.type_vars:
self.fail("Enum class cannot be generic", defn)
def visit_import(self, i: Import) -> None:
self.statement = i
for id, as_id in i.ids:
# When implicit re-exporting is disabled, we have the same behavior as stubs.
use_implicit_reexport = not self.is_stub_file and self.options.implicit_reexport
if as_id is not None:
base_id = id
imported_id = as_id
module_public = use_implicit_reexport or id.split(".")[-1] == as_id
else:
base_id = id.split('.')[0]
imported_id = base_id
module_public = use_implicit_reexport
self.add_module_symbol(base_id, imported_id, context=i, module_public=module_public,
module_hidden=not module_public)
def visit_import_from(self, imp: ImportFrom) -> None:
self.statement = imp
module_id = self.correct_relative_import(imp)
module = self.modules.get(module_id)
for id, as_id in imp.names:
fullname = module_id + '.' + id
self.set_future_import_flags(fullname)
if module is None:
node = None
elif module_id == self.cur_mod_id and fullname in self.modules:
# Submodule takes precedence over definition in surround package, for
# compatibility with runtime semantics in typical use cases. This
# could more precisely model runtime semantics by taking into account
# the line number beyond which the local definition should take
# precedence, but doesn't seem to be important in most use cases.
node = SymbolTableNode(GDEF, self.modules[fullname])
else:
if id == as_id == '__all__' and module_id in self.export_map:
self.all_exports[:] = self.export_map[module_id]
node = module.names.get(id)
missing_submodule = False
imported_id = as_id or id
if not node:
mod = self.modules.get(fullname)
if mod is not None:
kind = self.current_symbol_kind()
node = SymbolTableNode(kind, mod)
elif fullname in self.missing_modules:
missing_submodule = True
# If it is still not resolved, check for a module level __getattr__
if (module and not node and (module.is_stub or self.options.python_version >= (3, 7))
and '__getattr__' in module.names):
# We store the fullname of the original definition so that we can
# detect whether two imported names refer to the same thing.
fullname = module_id + '.' + id
gvar = self.create_getattr_var(module.names['__getattr__'], imported_id, fullname)
if gvar:
self.add_symbol(imported_id, gvar, imp)
continue
# Modules imported in a stub file without using 'from Y import X as X' will
# not get exported.
# When implicit re-exporting is disabled, we have the same behavior as stubs.
use_implicit_reexport = not self.is_stub_file and self.options.implicit_reexport
module_public = use_implicit_reexport or (as_id is not None and id == as_id)
if node and not node.module_hidden:
self.process_imported_symbol(
node, module_id, id, imported_id, fullname, module_public, context=imp
)
elif module and not missing_submodule:
# Target module exists but the imported name is missing or hidden.
self.report_missing_module_attribute(
module_id, id, imported_id, module_public=module_public,
module_hidden=not module_public, context=imp
)
else:
# Import of a missing (sub)module.
self.add_unknown_imported_symbol(
imported_id, imp, target_name=fullname, module_public=module_public,
module_hidden=not module_public
)
def process_imported_symbol(self,
node: SymbolTableNode,
module_id: str,
id: str,
imported_id: str,
fullname: str,
module_public: bool,
context: ImportBase) -> None:
module_hidden = not module_public and fullname not in self.modules
if isinstance(node.node, PlaceholderNode):
if self.final_iteration:
self.report_missing_module_attribute(
module_id, id, imported_id, module_public=module_public,
module_hidden=module_hidden, context=context
)
return
else:
# This might become a type.
self.mark_incomplete(imported_id, node.node,
module_public=module_public,
module_hidden=module_hidden,
becomes_typeinfo=True)
existing_symbol = self.globals.get(imported_id)
if (existing_symbol and not isinstance(existing_symbol.node, PlaceholderNode) and
not isinstance(node.node, PlaceholderNode)):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
imported_id, existing_symbol, node, context):
return
if existing_symbol and isinstance(node.node, PlaceholderNode):
# Imports are special, some redefinitions are allowed, so wait until
# we know what is the new symbol node.
return
# NOTE: we take the original node even for final `Var`s. This is to support
# a common pattern when constants are re-exported (same applies to import *).
self.add_imported_symbol(imported_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
def report_missing_module_attribute(
self, import_id: str, source_id: str, imported_id: str, module_public: bool,
module_hidden: bool, context: Node
) -> None:
# Missing attribute.
if self.is_incomplete_namespace(import_id):
# We don't know whether the name will be there, since the namespace
self.mark_incomplete(imported_id, context)
return
message = 'Module "{}" has no attribute "{}"'.format(import_id, source_id)
module = self.modules.get(import_id)
if module:
if not self.options.implicit_reexport and source_id in module.names.keys():
message = ('Module "{}" does not explicitly export attribute "{}"'
'; implicit reexport disabled'.format(import_id, source_id))
else:
alternatives = set(module.names.keys()).difference({source_id})
matches = best_matches(source_id, alternatives)[:3]
if matches:
suggestion = "; maybe {}?".format(pretty_seq(matches, "or"))
message += "{}".format(suggestion)
self.fail(message, context, code=codes.ATTR_DEFINED)
self.add_unknown_imported_symbol(
imported_id, context, target_name=None, module_public=module_public,
module_hidden=not module_public
)
if import_id == 'typing':
fullname = 'builtins.{}'.format(source_id.lower())
if (self.lookup_fully_qualified_or_none(fullname) is None and
fullname in SUGGESTED_TEST_FIXTURES):
# Yes. Generate a helpful note.
self.msg.add_fixture_note(fullname, context)
def process_import_over_existing_name(self,
imported_id: str, existing_symbol: SymbolTableNode,
module_symbol: SymbolTableNode,
import_node: ImportBase) -> bool:
if existing_symbol.node is module_symbol.node:
# We added this symbol on previous iteration.
return False
if (existing_symbol.kind in (LDEF, GDEF, MDEF) and
isinstance(existing_symbol.node, (Var, FuncDef, TypeInfo, Decorator, TypeAlias))):
# This is a valid import over an existing definition in the file. Construct a dummy
# assignment that we'll use to type check the import.
lvalue = NameExpr(imported_id)
lvalue.kind = existing_symbol.kind
lvalue.node = existing_symbol.node
rvalue = NameExpr(imported_id)
rvalue.kind = module_symbol.kind
rvalue.node = module_symbol.node
if isinstance(rvalue.node, TypeAlias):
rvalue.is_alias_rvalue = True
assignment = AssignmentStmt([lvalue], rvalue)
for node in assignment, lvalue, rvalue:
node.set_line(import_node)
import_node.assignments.append(assignment)
return True
return False
def correct_relative_import(self, node: Union[ImportFrom, ImportAll]) -> str:
import_id, ok = correct_relative_import(self.cur_mod_id, node.relative, node.id,
self.cur_mod_node.is_package_init_file())
if not ok:
self.fail("Relative import climbs too many namespaces", node)
return import_id
def visit_import_all(self, i: ImportAll) -> None:
i_id = self.correct_relative_import(i)
if i_id in self.modules:
m = self.modules[i_id]
if self.is_incomplete_namespace(i_id):
self.mark_incomplete('*', i)
for name, node in m.names.items():
fullname = i_id + '.' + name
self.set_future_import_flags(fullname)
if node is None:
continue
if node.module_public and (not name.startswith('_') or '__all__' in m.names):
if isinstance(node.node, MypyFile):
# Star import of submodule from a package, add it as a dependency.
self.imports.add(node.node.fullname)
existing_symbol = self.lookup_current_scope(name)
if existing_symbol and not isinstance(node.node, PlaceholderNode):
# Import can redefine a variable. They get special treatment.
if self.process_import_over_existing_name(
name, existing_symbol, node, i):
continue
# In stub files, `from x import *` always reexports the symbols.
# In regular files, only if implicit reexports are enabled.
module_public = self.is_stub_file or self.options.implicit_reexport
self.add_imported_symbol(name, node, i,
module_public=module_public,
module_hidden=not module_public)
else:
# Don't add any dummy symbols for 'from x import *' if 'x' is unknown.
pass
def visit_assignment_expr(self, s: AssignmentExpr) -> None:
s.value.accept(self)
self.analyze_lvalue(s.target, escape_comprehensions=True)
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
self.statement = s
if self.analyze_identity_global_assignment(s):
return
tag = self.track_incomplete_refs()
s.rvalue.accept(self)
if self.found_incomplete_ref(tag) or self.should_wait_rhs(s.rvalue):
# Make sure that if we skip the definition of some local names, they can't be
for expr in names_modified_by_assignment(s):
self.mark_incomplete(expr.name, expr)
return
special_form = False
if self.check_and_set_up_type_alias(s):
s.is_alias_def = True
special_form = True
elif self.process_typevar_declaration(s):
special_form = True
elif self.process_paramspec_declaration(s):
special_form = True
elif self.analyze_namedtuple_assign(s):
special_form = True
elif self.analyze_typeddict_assign(s):
special_form = True
elif self.newtype_analyzer.process_newtype_declaration(s):
special_form = True
elif self.analyze_enum_assign(s):
special_form = True
if special_form:
self.record_special_form_lvalue(s)
return
s.is_final_def = self.unwrap_final(s)
self.analyze_lvalues(s)
self.check_final_implicit_def(s)
self.check_classvar(s)
self.process_type_annotation(s)
self.apply_dynamic_class_hook(s)
self.store_final_status(s)
if not s.type:
self.process_module_assignment(s.lvalues, s.rvalue, s)
self.process__all__(s)
self.process__deletable__(s)
def analyze_identity_global_assignment(self, s: AssignmentStmt) -> bool:
if not isinstance(s.rvalue, NameExpr) or len(s.lvalues) != 1:
return False
lvalue = s.lvalues[0]
if not isinstance(lvalue, NameExpr) or s.rvalue.name != lvalue.name:
return False
if self.type is not None or self.is_func_scope():
return False
name = lvalue.name
sym = self.lookup(name, s)
if sym is None:
if self.final_iteration:
# Fall back to normal assignment analysis.
return False
else:
self.defer()
return True
else:
if sym.node is None:
# Something special -- fall back to normal assignment analysis.
return False
if name not in self.globals:
# The name is from builtins. Add an alias to the current module.
self.add_symbol(name, sym.node, s)
if not isinstance(sym.node, PlaceholderNode):
for node in s.rvalue, lvalue:
node.node = sym.node
node.kind = GDEF
node.fullname = sym.node.fullname
return True
def should_wait_rhs(self, rv: Expression) -> bool:
if self.final_iteration:
# No chance, nothing has changed.
return False
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and not n.node.becomes_typeinfo:
return True
elif isinstance(rv, IndexExpr) and isinstance(rv.base, RefExpr):
return self.should_wait_rhs(rv.base)
elif isinstance(rv, CallExpr) and isinstance(rv.callee, RefExpr):
# This is only relevant for builtin SCC where things like 'TypeVar'
# may be not ready.
return self.should_wait_rhs(rv.callee)
return False
def can_be_type_alias(self, rv: Expression, allow_none: bool = False) -> bool:
if isinstance(rv, RefExpr) and self.is_type_ref(rv, bare=True):
return True
if isinstance(rv, IndexExpr) and self.is_type_ref(rv.base, bare=False):
return True
if self.is_none_alias(rv):
return True
if allow_none and isinstance(rv, NameExpr) and rv.fullname == 'builtins.None':
return True
if (isinstance(rv, OpExpr)
and rv.op == '|'
and self.can_be_type_alias(rv.left, allow_none=True)
and self.can_be_type_alias(rv.right, allow_none=True)):
return True
return False
def is_type_ref(self, rv: Expression, bare: bool = False) -> bool:
if not isinstance(rv, RefExpr):
return False
if isinstance(rv.node, TypeVarExpr):
self.fail('Type variable "{}" is invalid as target for type alias'.format(
rv.fullname), rv)
return False
if bare:
# These three are valid even if bare, for example
# A = Tuple is just equivalent to A = Tuple[Any, ...].
valid_refs = {'typing.Any', 'typing.Tuple', 'typing.Callable'}
else:
valid_refs = type_constructors
if isinstance(rv.node, TypeAlias) or rv.fullname in valid_refs:
return True
if isinstance(rv.node, TypeInfo):
if bare:
return True
# Assignment color = Color['RED'] defines a variable, not an alias.
return not rv.node.is_enum
if isinstance(rv, NameExpr):
n = self.lookup(rv.name, rv)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
elif isinstance(rv, MemberExpr):
fname = get_member_expr_fullname(rv)
if fname:
# The r.h.s. for variable definitions may not be a type reference but just
# an instance attribute, so suppress the errors.
n = self.lookup_qualified(fname, rv, suppress_errors=True)
if n and isinstance(n.node, PlaceholderNode) and n.node.becomes_typeinfo:
return True
return False
def is_none_alias(self, node: Expression) -> bool:
if isinstance(node, CallExpr):
if (isinstance(node.callee, NameExpr) and len(node.args) == 1 and
isinstance(node.args[0], NameExpr)):
call = self.lookup_qualified(node.callee.name, node.callee)
arg = self.lookup_qualified(node.args[0].name, node.args[0])
if (call is not None and call.node and call.node.fullname == 'builtins.type' and
arg is not None and arg.node and arg.node.fullname == 'builtins.None'):
return True
return False
def record_special_form_lvalue(self, s: AssignmentStmt) -> None:
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
lvalue.is_special_form = True
if self.current_symbol_kind() == GDEF:
lvalue.fullname = self.qualified_name(lvalue.name)
lvalue.kind = self.current_symbol_kind()
def analyze_enum_assign(self, s: AssignmentStmt) -> bool:
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, EnumCallExpr):
# Already analyzed enum -- nothing to do here.
return True
return self.enum_call_analyzer.process_enum_call(s, self.is_func_scope())
def analyze_namedtuple_assign(self, s: AssignmentStmt) -> bool:
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, NamedTupleExpr):
return True # This is a valid and analyzed named tuple definition, nothing to do here.
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
name = lvalue.name
internal_name, info = self.named_tuple_analyzer.check_namedtuple(s.rvalue, name,
self.is_func_scope())
if internal_name is None:
return False
if isinstance(lvalue, MemberExpr):
self.fail("NamedTuple type as an attribute is not supported", lvalue)
return False
if internal_name != name:
self.fail('First argument to namedtuple() should be "{}", not "{}"'.format(
name, internal_name), s.rvalue, code=codes.NAME_MATCH)
return True
# Yes, it's a valid namedtuple, but defer if it is not ready.
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_typeddict_assign(self, s: AssignmentStmt) -> bool:
if isinstance(s.rvalue, CallExpr) and isinstance(s.rvalue.analyzed, TypedDictExpr):
return True
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], (NameExpr, MemberExpr)):
return False
lvalue = s.lvalues[0]
name = lvalue.name
is_typed_dict, info = self.typed_dict_analyzer.check_typeddict(s.rvalue, name,
self.is_func_scope())
if not is_typed_dict:
return False
if isinstance(lvalue, MemberExpr):
self.fail("TypedDict type as attribute is not supported", lvalue)
return False
if not info:
self.mark_incomplete(name, lvalue, becomes_typeinfo=True)
return True
def analyze_lvalues(self, s: AssignmentStmt) -> None:
# We cannot use s.type, because analyze_simple_literal_type() will set it.
explicit = s.unanalyzed_type is not None
if self.is_final_type(s.unanalyzed_type):
# We need to exclude bare Final.
assert isinstance(s.unanalyzed_type, UnboundType)
if not s.unanalyzed_type.args:
explicit = False
for lval in s.lvalues:
self.analyze_lvalue(lval,
explicit_type=explicit,
is_final=s.is_final_def)
def apply_dynamic_class_hook(self, s: AssignmentStmt) -> None:
if len(s.lvalues) > 1:
return
lval = s.lvalues[0]
if not isinstance(lval, NameExpr) or not isinstance(s.rvalue, CallExpr):
return
call = s.rvalue
fname = None
if isinstance(call.callee, RefExpr):
fname = call.callee.fullname
# check if method call
if fname is None and isinstance(call.callee, MemberExpr):
callee_expr = call.callee.expr
if isinstance(callee_expr, RefExpr) and callee_expr.fullname:
method_name = call.callee.name
fname = callee_expr.fullname + '.' + method_name
if fname:
hook = self.plugin.get_dynamic_class_hook(fname)
if hook:
hook(DynamicClassDefContext(call, lval.name, self))
def unwrap_final(self, s: AssignmentStmt) -> bool:
if not s.unanalyzed_type or not self.is_final_type(s.unanalyzed_type):
return False
assert isinstance(s.unanalyzed_type, UnboundType)
if len(s.unanalyzed_type.args) > 1:
self.fail("Final[...] takes at most one type argument", s.unanalyzed_type)
invalid_bare_final = False
if not s.unanalyzed_type.args:
s.type = None
if isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs:
invalid_bare_final = True
self.fail("Type in Final[...] can only be omitted if there is an initializer", s)
else:
s.type = s.unanalyzed_type.args[0]
if s.type is not None and self.is_classvar(s.type):
self.fail("Variable should not be annotated with both ClassVar and Final", s)
return False
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], RefExpr):
self.fail("Invalid final declaration", s)
return False
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
# Reset inferred status if it was set due to simple literal rvalue on previous iteration.
# TODO: this is a best-effort quick fix, we should avoid the need to manually sync this,
# see https://github.com/python/mypy/issues/6458.
if lval.is_new_def:
lval.is_inferred_def = s.type is None
if self.loop_depth > 0:
self.fail("Cannot use Final inside a loop", s)
if self.type and self.type.is_protocol:
self.msg.protocol_members_cant_be_final(s)
if (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs and
not self.is_stub_file and not self.is_class_scope()):
if not invalid_bare_final: # Skip extra error messages.
self.msg.final_without_value(s)
return True
def check_final_implicit_def(self, s: AssignmentStmt) -> None:
if not s.is_final_def:
return
lval = s.lvalues[0]
assert isinstance(lval, RefExpr)
if isinstance(lval, MemberExpr):
if not self.is_self_member_ref(lval):
self.fail("Final can be only applied to a name or an attribute on self", s)
s.is_final_def = False
return
else:
assert self.function_stack
if self.function_stack[-1].name != '__init__':
self.fail("Can only declare a final attribute in class body or __init__", s)
s.is_final_def = False
return
def store_final_status(self, s: AssignmentStmt) -> None:
if s.is_final_def:
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
node = s.lvalues[0].node
if isinstance(node, Var):
node.is_final = True
node.final_value = self.unbox_literal(s.rvalue)
if (self.is_class_scope() and
(isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
node.final_unset_in_class = True
else:
# Special case: deferred initialization of a final attribute in __init__.
# In this case we just pretend this is a valid final definition to suppress
# errors about assigning to final attribute.
for lval in self.flatten_lvalues(s.lvalues):
if isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name, None)
if cur_node and isinstance(cur_node.node, Var) and cur_node.node.is_final:
assert self.function_stack
top_function = self.function_stack[-1]
if (top_function.name == '__init__' and
cur_node.node.final_unset_in_class and
not cur_node.node.final_set_in_init and
not (isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs)):
cur_node.node.final_set_in_init = True
s.is_final_def = True
def flatten_lvalues(self, lvalues: List[Expression]) -> List[Expression]:
res: List[Expression] = []
for lv in lvalues:
if isinstance(lv, (TupleExpr, ListExpr)):
res.extend(self.flatten_lvalues(lv.items))
else:
res.append(lv)
return res
def unbox_literal(self, e: Expression) -> Optional[Union[int, float, bool, str]]:
if isinstance(e, (IntExpr, FloatExpr, StrExpr)):
return e.value
elif isinstance(e, NameExpr) and e.name in ('True', 'False'):
return True if e.name == 'True' else False
return None
def process_type_annotation(self, s: AssignmentStmt) -> None:
if s.type:
lvalue = s.lvalues[-1]
allow_tuple_literal = isinstance(lvalue, TupleExpr)
analyzed = self.anal_type(s.type, allow_tuple_literal=allow_tuple_literal)
# Don't store not ready types (including placeholders).
if analyzed is None or has_placeholder(analyzed):
return
s.type = analyzed
if (self.type and self.type.is_protocol and isinstance(lvalue, NameExpr) and
isinstance(s.rvalue, TempNode) and s.rvalue.no_rhs):
if isinstance(lvalue.node, Var):
lvalue.node.is_abstract_var = True
else:
if (self.type and self.type.is_protocol and
self.is_annotated_protocol_member(s) and not self.is_func_scope()):
self.fail('All protocol members must have explicitly declared types', s)
if len(s.lvalues) == 1 and isinstance(s.lvalues[0], RefExpr):
if s.lvalues[0].is_inferred_def:
s.type = self.analyze_simple_literal_type(s.rvalue, s.is_final_def)
if s.type:
for lvalue in s.lvalues:
self.store_declared_types(lvalue, s.type)
def is_annotated_protocol_member(self, s: AssignmentStmt) -> bool:
return any(
(
isinstance(lv, NameExpr)
and lv.name != '__slots__'
and lv.is_inferred_def
)
for lv in s.lvalues
)
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> Optional[Type]:
if self.options.semantic_analysis_only or self.function_stack:
# This is mostly to avoid breaking unit tests.
# Also skip inside a function; this is to avoid confusing
# the code that handles dead code due to isinstance()
# inside type variables with value restrictions (like
# AnyStr).
return None
if isinstance(rvalue, FloatExpr):
return self.named_type_or_none('builtins.float')
value: Optional[LiteralValue] = None
type_name: Optional[str] = None
if isinstance(rvalue, IntExpr):
value, type_name = rvalue.value, 'builtins.int'
if isinstance(rvalue, StrExpr):
value, type_name = rvalue.value, 'builtins.str'
if isinstance(rvalue, BytesExpr):
value, type_name = rvalue.value, 'builtins.bytes'
if isinstance(rvalue, UnicodeExpr):
value, type_name = rvalue.value, 'builtins.unicode'
if type_name is not None:
assert value is not None
typ = self.named_type_or_none(type_name)
if typ and is_final:
return typ.copy_modified(last_known_value=LiteralType(
value=value,
fallback=typ,
line=typ.line,
column=typ.column,
))
return typ
return None
def analyze_alias(self, rvalue: Expression,
allow_placeholder: bool = False) -> Tuple[Optional[Type], List[str],
Set[str], List[str]]:
dynamic = bool(self.function_stack and self.function_stack[-1].is_dynamic())
global_scope = not self.type and not self.function_stack
res = analyze_type_alias(rvalue,
self,
self.tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_new_syntax=self.is_stub_file,
allow_placeholder=allow_placeholder,
in_dynamic_func=dynamic,
global_scope=global_scope)
typ: Optional[Type] = None
if res:
typ, depends_on = res
found_type_vars = typ.accept(TypeVarLikeQuery(self.lookup_qualified, self.tvar_scope))
alias_tvars = [name for (name, node) in found_type_vars]
qualified_tvars = [node.fullname for (name, node) in found_type_vars]
else:
alias_tvars = []
depends_on = set()
qualified_tvars = []
return typ, alias_tvars, depends_on, qualified_tvars
def check_and_set_up_type_alias(self, s: AssignmentStmt) -> bool:
lvalue = s.lvalues[0]
if len(s.lvalues) > 1 or not isinstance(lvalue, NameExpr):
# First rule: Only simple assignments like Alias = ... create aliases.
return False
if s.unanalyzed_type is not None:
# Second rule: Explicit type (cls: Type[A] = A) always creates variable, not alias.
return False
existing = self.current_symbol_table().get(lvalue.name)
# Third rule: type aliases can't be re-defined. For example:
= float # Error!
# Don't create an alias in these cases:
if (existing
and (isinstance(existing.node, Var)
or (isinstance(existing.node, TypeAlias)
and not s.is_alias_def)
or (isinstance(existing.node, PlaceholderNode)
and existing.node.node.line < s.line))):
if isinstance(existing.node, TypeAlias) and not s.is_alias_def:
self.fail('Cannot assign multiple types to name "{}"'
' without an explicit "Type[...]" annotation'
.format(lvalue.name), lvalue)
return False
non_global_scope = self.type or self.is_func_scope()
if isinstance(s.rvalue, RefExpr) and non_global_scope:
False
rvalue = s.rvalue
if not self.can_be_type_alias(rvalue):
return False
if existing and not isinstance(existing.node, (PlaceholderNode, TypeAlias)):
return False
res: Optional[Type] = None
if self.is_none_alias(rvalue):
res = NoneType()
alias_tvars, depends_on, qualified_tvars = \
[], set(), []
else:
tag = self.track_incomplete_refs()
res, alias_tvars, depends_on, qualified_tvars = \
self.analyze_alias(rvalue, allow_placeholder=True)
if not res:
return False
if self.found_incomplete_ref(tag) or has_placeholder(res):
self.mark_incomplete(lvalue.name, rvalue, becomes_typeinfo=True)
return True
self.add_type_alias_deps(depends_on)
self.add_type_alias_deps(qualified_tvars)
check_for_explicit_any(res, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
if not has_placeholder(res):
res = make_any_non_explicit(res)
no_args = isinstance(res, Instance) and not res.args
fix_instance_types(res, self.fail, self.note, self.options.python_version)
# the function, since the symbol table will no longer
# exist. Work around by expanding them eagerly when used.
eager = self.is_func_scope()
alias_node = TypeAlias(res,
self.qualified_name(lvalue.name),
s.line,
s.column,
alias_tvars=alias_tvars,
no_args=no_args,
eager=eager)
if isinstance(s.rvalue, (IndexExpr, CallExpr)): # CallExpr is for `void = type(None)`
s.rvalue.analyzed = TypeAliasExpr(alias_node)
s.rvalue.analyzed.line = s.line
# we use the column from resulting target, to get better location for errors
s.rvalue.analyzed.column = res.column
elif isinstance(s.rvalue, RefExpr):
s.rvalue.is_alias_rvalue = True
if existing:
# An alias gets updated.
updated = False
if isinstance(existing.node, TypeAlias):
if existing.node.target != res:
# Copy expansion to the existing alias, this matches how we update base classes
# for a TypeInfo _in place_ if there are nested placeholders.
existing.node.target = res
existing.node.alias_tvars = alias_tvars
existing.node.no_args = no_args
updated = True
else:
# Otherwise just replace existing placeholder with type alias.
existing.node = alias_node
updated = True
if updated:
if self.final_iteration:
self.cannot_resolve_name(lvalue.name, 'name', s)
return True
else:
self.progress = True
# We need to defer so that this change can get propagated to base classes.
self.defer(s)
else:
self.add_symbol(lvalue.name, alias_node, s)
if isinstance(rvalue, RefExpr) and isinstance(rvalue.node, TypeAlias):
alias_node.normalized = rvalue.node.normalized
return True
def analyze_lvalue(self,
lval: Lvalue,
nested: bool = False,
explicit_type: bool = False,
is_final: bool = False,
escape_comprehensions: bool = False) -> None:
if escape_comprehensions:
assert isinstance(lval, NameExpr), "assignment expression target must be NameExpr"
if isinstance(lval, NameExpr):
self.analyze_name_lvalue(lval, explicit_type, is_final, escape_comprehensions)
elif isinstance(lval, MemberExpr):
self.analyze_member_lvalue(lval, explicit_type, is_final)
if explicit_type and not self.is_self_member_ref(lval):
self.fail('Type cannot be declared in assignment to non-self '
'attribute', lval)
elif isinstance(lval, IndexExpr):
if explicit_type:
self.fail('Unexpected type declaration', lval)
lval.accept(self)
elif isinstance(lval, TupleExpr):
self.analyze_tuple_or_list_lvalue(lval, explicit_type)
elif isinstance(lval, StarExpr):
if nested:
self.analyze_lvalue(lval.expr, nested, explicit_type)
else:
self.fail('Starred assignment target must be in a list or tuple', lval)
else:
self.fail('Invalid assignment target', lval)
def analyze_name_lvalue(self,
lvalue: NameExpr,
explicit_type: bool,
is_final: bool,
escape_comprehensions: bool) -> None:
if lvalue.node:
# This has been bound already in a previous iteration.
return
name = lvalue.name
if self.is_alias_for_final_name(name):
if is_final:
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.msg.cant_assign_to_final(name, self.type is not None, lvalue)
kind = self.current_symbol_kind()
names = self.current_symbol_table()
existing = names.get(name)
outer = self.is_global_or_nonlocal(name)
if (not existing or isinstance(existing.node, PlaceholderNode)) and not outer:
# Define new variable.
var = self.make_name_lvalue_var(lvalue, kind, not explicit_type)
added = self.add_symbol(name, var, lvalue, escape_comprehensions=escape_comprehensions)
# Only bind expression if we successfully added name to symbol table.
if added:
lvalue.is_new_def = True
lvalue.is_inferred_def = True
lvalue.kind = kind
lvalue.node = var
if kind == GDEF:
lvalue.fullname = var._fullname
else:
lvalue.fullname = lvalue.name
if self.is_func_scope():
if unmangle(name) == '_':
# Special case for assignment to local named '_': always infer 'Any'.
typ = AnyType(TypeOfAny.special_form)
self.store_declared_types(lvalue, typ)
if is_final and self.is_final_redefinition(kind, name):
self.fail("Cannot redefine an existing name as final", lvalue)
else:
self.make_name_lvalue_point_to_existing_def(lvalue, explicit_type, is_final)
def is_final_redefinition(self, kind: int, name: str) -> bool:
if kind == GDEF:
return self.is_mangled_global(name) and not self.is_initial_mangled_global(name)
elif kind == MDEF and self.type:
return unmangle(name) + "'" in self.type.names
return False
def is_alias_for_final_name(self, name: str) -> bool:
if self.is_func_scope():
if not name.endswith("'"):
# Not a mangled name -- can't be an alias
return False
name = unmangle(name)
assert self.locals[-1] is not None, "No locals at function scope"
existing = self.locals[-1].get(name)
return existing is not None and is_final_node(existing.node)
elif self.type is not None:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.type.names.get(orig_name)
return existing is not None and is_final_node(existing.node)
else:
orig_name = unmangle(name) + "'"
if name == orig_name:
return False
existing = self.globals.get(orig_name)
return existing is not None and is_final_node(existing.node)
def make_name_lvalue_var(self, lvalue: NameExpr, kind: int, inferred: bool) -> Var:
v = Var(lvalue.name)
v.set_line(lvalue)
v.is_inferred = inferred
if kind == MDEF:
assert self.type is not None
v.info = self.type
v.is_initialized_in_class = True
if kind != LDEF:
v._fullname = self.qualified_name(lvalue.name)
else:
v._fullname = lvalue.name
v.is_ready = False
return v
def make_name_lvalue_point_to_existing_def(
self,
lval: NameExpr,
explicit_type: bool,
is_final: bool) -> None:
if is_final:
self.fail("Cannot redefine an existing name as final", lval)
original_def = self.lookup(lval.name, lval, suppress_errors=True)
if original_def is None and self.type and not self.is_func_scope():
original_def = self.type.get(lval.name)
if explicit_type:
self.name_already_defined(lval.name, lval, original_def)
else:
# Bind to an existing name.
if original_def:
self.bind_name_expr(lval, original_def)
else:
self.name_not_defined(lval.name, lval)
self.check_lvalue_validity(lval.node, lval)
def analyze_tuple_or_list_lvalue(self, lval: TupleExpr,
explicit_type: bool = False) -> None:
items = lval.items
star_exprs = [item for item in items if isinstance(item, StarExpr)]
if len(star_exprs) > 1:
self.fail('Two starred expressions in assignment', lval)
else:
if len(star_exprs) == 1:
star_exprs[0].valid = True
for i in items:
self.analyze_lvalue(i, nested=True, explicit_type=explicit_type)
def analyze_member_lvalue(self, lval: MemberExpr, explicit_type: bool, is_final: bool) -> None:
if lval.node:
# This has been bound already in a previous iteration.
return
lval.accept(self)
if self.is_self_member_ref(lval):
assert self.type, "Self member outside a class"
cur_node = self.type.names.get(lval.name)
node = self.type.get(lval.name)
if cur_node and is_final:
# Overrides will be checked in type checker.
self.fail("Cannot redefine an existing name as final", lval)
# On first encounter with this definition, if this attribute was defined before
# with an inferred type and it's marked with an explicit type now, give an error.
if (not lval.node and cur_node and isinstance(cur_node.node, Var) and
cur_node.node.is_inferred and explicit_type):
self.attribute_already_defined(lval.name, lval, cur_node)
if (node is None
or (isinstance(node.node, Var) and node.node.is_abstract_var)
or (cur_node is None and (explicit_type or is_final))):
if self.type.is_protocol and node is None:
self.fail("Protocol members cannot be defined via assignment to self", lval)
else:
lval.is_new_def = True
lval.is_inferred_def = True
v = Var(lval.name)
v.set_line(lval)
v._fullname = self.qualified_name(lval.name)
v.info = self.type
v.is_ready = False
v.explicit_self_type = explicit_type or is_final
lval.def_var = v
lval.node = v
self.type.names[lval.name] = SymbolTableNode(MDEF, v, implicit=True)
self.check_lvalue_validity(lval.node, lval)
def is_self_member_ref(self, memberexpr: MemberExpr) -> bool:
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
def check_lvalue_validity(self, node: Union[Expression, SymbolNode, None],
ctx: Context) -> None:
if isinstance(node, TypeVarExpr):
self.fail('Invalid assignment target', ctx)
elif isinstance(node, TypeInfo):
self.fail(message_registry.CANNOT_ASSIGN_TO_TYPE, ctx)
def store_declared_types(self, lvalue: Lvalue, typ: Type) -> None:
if isinstance(typ, StarType) and not isinstance(lvalue, StarExpr):
self.fail('Star type only allowed for starred expressions', lvalue)
if isinstance(lvalue, RefExpr):
lvalue.is_inferred_def = False
if isinstance(lvalue.node, Var):
var = lvalue.node
var.type = typ
var.is_ready = True
elif isinstance(lvalue, TupleExpr):
typ = get_proper_type(typ)
if isinstance(typ, TupleType):
if len(lvalue.items) != len(typ.items):
self.fail('Incompatible number of tuple items', lvalue)
return
for item, itemtype in zip(lvalue.items, typ.items):
self.store_declared_types(item, itemtype)
else:
self.fail('Tuple type expected for multiple variables',
lvalue)
elif isinstance(lvalue, StarExpr):
# Historical behavior for the old parser
if isinstance(typ, StarType):
self.store_declared_types(lvalue.expr, typ.type)
else:
self.store_declared_types(lvalue.expr, typ)
else:
# This has been flagged elsewhere as an error, so just ignore here.
pass
def process_typevar_declaration(self, s: AssignmentStmt) -> bool:
call = self.get_typevarlike_declaration(s, ("typing.TypeVar",))
if not call:
return False
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a type variable", s)
return False
name = lvalue.name
if not self.check_typevarlike_name(call, name, s):
return False
# Constraining types
n_values = call.arg_kinds[1:].count(ARG_POS)
values = self.analyze_value_types(call.args[1:1 + n_values])
res = self.process_typevar_parameters(call.args[1 + n_values:],
call.arg_names[1 + n_values:],
call.arg_kinds[1 + n_values:],
n_values,
s)
if res is None:
return False
variance, upper_bound = res
existing = self.current_symbol_table().get(name)
if existing and not (isinstance(existing.node, PlaceholderNode) or
# Also give error for another type variable with the same name.
(isinstance(existing.node, TypeVarExpr) and
existing.node is call.analyzed)):
self.fail('Cannot redefine "%s" as a type variable' % name, s)
return False
if self.options.disallow_any_unimported:
for idx, constraint in enumerate(values, start=1):
if has_any_from_unimported_type(constraint):
prefix = "Constraint {}".format(idx)
self.msg.unimported_type_becomes_any(prefix, constraint, s)
if has_any_from_unimported_type(upper_bound):
prefix = "Upper bound of type variable"
self.msg.unimported_type_becomes_any(prefix, upper_bound, s)
for t in values + [upper_bound]:
check_for_explicit_any(t, self.options, self.is_typeshed_stub_file, self.msg,
context=s)
# mypyc suppresses making copies of a function to check each
# possible type, so set the upper bound to Any to prevent that
# from causing errors.
if values and self.options.mypyc:
upper_bound = AnyType(TypeOfAny.implementation_artifact)
# Yes, it's a valid type variable definition! Add it to the symbol table.
if not call.analyzed:
type_var = TypeVarExpr(name, self.qualified_name(name),
values, upper_bound, variance)
type_var.line = call.line
call.analyzed = type_var
else:
assert isinstance(call.analyzed, TypeVarExpr)
if call.analyzed.values != values or call.analyzed.upper_bound != upper_bound:
self.progress = True
call.analyzed.upper_bound = upper_bound
call.analyzed.values = values
self.add_symbol(name, call.analyzed, s)
return True
def check_typevarlike_name(self, call: CallExpr, name: str, context: Context) -> bool:
name = unmangle(name)
assert isinstance(call.callee, RefExpr)
typevarlike_type = (
call.callee.name if isinstance(call.callee, NameExpr) else call.callee.fullname
)
if len(call.args) < 1:
self.fail("Too few arguments for {}()".format(typevarlike_type), context)
return False
if (not isinstance(call.args[0], (StrExpr, BytesExpr, UnicodeExpr))
or not call.arg_kinds[0] == ARG_POS):
self.fail("{}() expects a string literal as first argument".format(typevarlike_type),
context)
return False
elif call.args[0].value != name:
msg = 'String argument 1 "{}" to {}(...) does not match variable name "{}"'
self.fail(msg.format(call.args[0].value, typevarlike_type, name), context)
return False
return True
def get_typevarlike_declaration(self, s: AssignmentStmt,
typevarlike_types: Tuple[str, ...]) -> Optional[CallExpr]:
if len(s.lvalues) != 1 or not isinstance(s.lvalues[0], NameExpr):
return None
if not isinstance(s.rvalue, CallExpr):
return None
call = s.rvalue
callee = call.callee
if not isinstance(callee, RefExpr):
return None
if callee.fullname not in typevarlike_types:
return None
return call
def process_typevar_parameters(self, args: List[Expression],
names: List[Optional[str]],
kinds: List[ArgKind],
num_values: int,
context: Context) -> Optional[Tuple[int, Type]]:
has_values = (num_values > 0)
covariant = False
contravariant = False
upper_bound: Type = self.object_type()
for param_value, param_name, param_kind in zip(args, names, kinds):
if not param_kind == ARG_NAMED:
self.fail("Unexpected argument to TypeVar()", context)
return None
if param_name == 'covariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
covariant = True
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'covariant' may only be 'True'", context)
return None
elif param_name == 'contravariant':
if isinstance(param_value, NameExpr):
if param_value.name == 'True':
contravariant = True
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
else:
self.fail("TypeVar 'contravariant' may only be 'True'", context)
return None
elif param_name == 'bound':
if has_values:
self.fail("TypeVar cannot have both values and an upper bound", context)
return None
try:
analyzed = self.expr_to_analyzed_type(param_value,
allow_placeholder=True,
report_invalid_types=False)
if analyzed is None:
analyzed = PlaceholderType(None, [], context.line)
upper_bound = get_proper_type(analyzed)
if isinstance(upper_bound, AnyType) and upper_bound.is_from_error:
self.fail('TypeVar "bound" must be a type', param_value)
except TypeTranslationError:
self.fail('TypeVar "bound" must be a type', param_value)
return None
elif param_name == 'values':
self.fail('TypeVar "values" argument not supported', context)
self.fail("Use TypeVar('T', t, ...) instead of TypeVar('T', values=(t, ...))",
context)
return None
else:
self.fail('Unexpected argument to TypeVar(): "{}"'.format(param_name), context)
return None
if covariant and contravariant:
self.fail("TypeVar cannot be both covariant and contravariant", context)
return None
elif num_values == 1:
self.fail("TypeVar cannot have only a single constraint", context)
return None
elif covariant:
variance = COVARIANT
elif contravariant:
variance = CONTRAVARIANT
else:
variance = INVARIANT
return variance, upper_bound
def process_paramspec_declaration(self, s: AssignmentStmt) -> bool:
if not self.options.wip_pep_612:
return False
call = self.get_typevarlike_declaration(
s, ("typing_extensions.ParamSpec", "typing.ParamSpec")
)
if not call:
return False
lvalue = s.lvalues[0]
assert isinstance(lvalue, NameExpr)
if s.type:
self.fail("Cannot declare the type of a parameter specification", s)
return False
name = lvalue.name
if not self.check_typevarlike_name(call, name, s):
return False
if not call.analyzed:
paramspec_var = ParamSpecExpr(
name, self.qualified_name(name), self.object_type(), INVARIANT
)
paramspec_var.line = call.line
call.analyzed = paramspec_var
else:
assert isinstance(call.analyzed, ParamSpecExpr)
self.add_symbol(name, call.analyzed, s)
return True
def basic_new_typeinfo(self, name: str,
basetype_or_fallback: Instance,
line: int) -> TypeInfo:
if self.is_func_scope() and not self.type and '@' not in name:
name += '@' + str(line)
class_def = ClassDef(name, Block([]))
if self.is_func_scope() and not self.type:
class_def.fullname = self.cur_mod_id + '.' + self.qualified_name(name)
else:
class_def.fullname = self.qualified_name(name)
info = TypeInfo(SymbolTable(), class_def, self.cur_mod_id)
class_def.info = info
mro = basetype_or_fallback.type.mro
if not mro:
mro = [basetype_or_fallback.type, self.object_type().type]
info.mro = [info] + mro
info.bases = [basetype_or_fallback]
return info
def analyze_value_types(self, items: List[Expression]) -> List[Type]:
result: List[Type] = []
for node in items:
try:
analyzed = self.anal_type(self.expr_to_unanalyzed_type(node),
allow_placeholder=True)
if analyzed is None:
analyzed = PlaceholderType(None, [], node.line)
result.append(analyzed)
except TypeTranslationError:
self.fail('Type expected', node)
result.append(AnyType(TypeOfAny.from_error))
return result
def check_classvar(self, s: AssignmentStmt) -> None:
lvalue = s.lvalues[0]
if len(s.lvalues) != 1 or not isinstance(lvalue, RefExpr):
return
if not s.type or not self.is_classvar(s.type):
return
if self.is_class_scope() and isinstance(lvalue, NameExpr):
node = lvalue.node
if isinstance(node, Var):
node.is_classvar = True
elif not isinstance(lvalue, MemberExpr) or self.is_self_member_ref(lvalue):
self.fail_invalid_classvar(lvalue)
def is_classvar(self, typ: Type) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname == 'typing.ClassVar'
def is_final_type(self, typ: Optional[Type]) -> bool:
if not isinstance(typ, UnboundType):
return False
sym = self.lookup_qualified(typ.name, typ)
if not sym or not sym.node:
return False
return sym.node.fullname in ('typing.Final', 'typing_extensions.Final')
def fail_invalid_classvar(self, context: Context) -> None:
self.fail('ClassVar can only be used for assignments in class body', context)
def process_module_assignment(self, lvals: List[Lvalue], rval: Expression,
ctx: AssignmentStmt) -> None:
if (isinstance(rval, (TupleExpr, ListExpr))
and all(isinstance(v, TupleExpr) for v in lvals)):
# understand our all(isinstance(...)), so cast them as TupleExpr
# so mypy knows it is safe to access their .items attribute.
seq_lvals = cast(List[TupleExpr], lvals)
# given an assignment like:
# (x, y) = (m, n) = (a, b)
# we now have:
# seq_lvals = [(x, y), (m, n)]
# seq_rval = (a, b)
# We now zip this into:
# elementwise_assignments = [(a, x, m), (b, y, n)]
# where each elementwise assignment includes one element of rval and the
# corresponding element of each lval. Basically we unpack
# (x, y) = (m, n) = (a, b)
# into elementwise assignments
# x = m = a
# y = n = b
# and then we recursively call this method for each of those assignments.
# If the rval and all lvals are not all of the same length, zip will just ignore
# extra elements, so no error will be raised here; mypy will later complain
# about the length mismatch in type-checking.
elementwise_assignments = zip(rval.items, *[v.items for v in seq_lvals])
for rv, *lvs in elementwise_assignments:
self.process_module_assignment(lvs, rv, ctx)
elif isinstance(rval, RefExpr):
rnode = self.lookup_type_node(rval)
if rnode and isinstance(rnode.node, MypyFile):
for lval in lvals:
if not isinstance(lval, RefExpr):
continue
# respect explicitly annotated type
if (isinstance(lval.node, Var) and lval.node.type is not None):
continue
# We can handle these assignments to locals and to self
if isinstance(lval, NameExpr):
lnode = self.current_symbol_table().get(lval.name)
elif isinstance(lval, MemberExpr) and self.is_self_member_ref(lval):
assert self.type is not None
lnode = self.type.names.get(lval.name)
else:
continue
if lnode:
if isinstance(lnode.node, MypyFile) and lnode.node is not rnode.node:
assert isinstance(lval, (NameExpr, MemberExpr))
self.fail(
'Cannot assign multiple modules to name "{}" '
'without explicit "types.ModuleType" annotation'.format(lval.name),
ctx)
# never create module alias except on initial var definition
elif lval.is_inferred_def:
assert rnode.node is not None
lnode.node = rnode.node
def process__all__(self, s: AssignmentStmt) -> None:
if (len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and
s.lvalues[0].name == '__all__' and s.lvalues[0].kind == GDEF and
isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def process__deletable__(self, s: AssignmentStmt) -> None:
if not self.options.mypyc:
return
if (len(s.lvalues) == 1 and isinstance(s.lvalues[0], NameExpr) and
s.lvalues[0].name == '__deletable__' and s.lvalues[0].kind == MDEF):
rvalue = s.rvalue
if not isinstance(rvalue, (ListExpr, TupleExpr)):
self.fail('"__deletable__" must be initialized with a list or tuple expression', s)
return
items = rvalue.items
attrs = []
for item in items:
if not isinstance(item, StrExpr):
self.fail('Invalid "__deletable__" item; string literal expected', item)
else:
attrs.append(item.value)
assert self.type
self.type.deletable_attributes = attrs
#
# Misc statements
#
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.block_depth[-1] += 1
for s in b.body:
self.accept(s)
self.block_depth[-1] -= 1
def visit_block_maybe(self, b: Optional[Block]) -> None:
if b:
self.visit_block(b)
def visit_expression_stmt(self, s: ExpressionStmt) -> None:
self.statement = s
s.expr.accept(self)
def visit_return_stmt(self, s: ReturnStmt) -> None:
self.statement = s
if not self.is_func_scope():
self.fail('"return" outside function', s)
if s.expr:
s.expr.accept(self)
def visit_raise_stmt(self, s: RaiseStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.from_expr:
s.from_expr.accept(self)
def visit_assert_stmt(self, s: AssertStmt) -> None:
self.statement = s
if s.expr:
s.expr.accept(self)
if s.msg:
s.msg.accept(self)
def visit_operator_assignment_stmt(self,
s: OperatorAssignmentStmt) -> None:
self.statement = s
s.lvalue.accept(self)
s.rvalue.accept(self)
if (isinstance(s.lvalue, NameExpr) and s.lvalue.name == '__all__' and
s.lvalue.kind == GDEF and isinstance(s.rvalue, (ListExpr, TupleExpr))):
self.add_exports(s.rvalue.items)
def visit_while_stmt(self, s: WhileStmt) -> None:
self.statement = s
s.expr.accept(self)
self.loop_depth += 1
s.body.accept(self)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_for_stmt(self, s: ForStmt) -> None:
self.statement = s
s.expr.accept(self)
# Bind index variables and check if they define new names.
self.analyze_lvalue(s.index, explicit_type=s.index_type is not None)
if s.index_type:
if self.is_classvar(s.index_type):
self.fail_invalid_classvar(s.index)
allow_tuple_literal = isinstance(s.index, TupleExpr)
analyzed = self.anal_type(s.index_type, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
self.store_declared_types(s.index, analyzed)
s.index_type = analyzed
self.loop_depth += 1
self.visit_block(s.body)
self.loop_depth -= 1
self.visit_block_maybe(s.else_body)
def visit_break_stmt(self, s: BreakStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail('"break" outside loop', s, serious=True, blocker=True)
def visit_continue_stmt(self, s: ContinueStmt) -> None:
self.statement = s
if self.loop_depth == 0:
self.fail('"continue" outside loop', s, serious=True, blocker=True)
def visit_if_stmt(self, s: IfStmt) -> None:
self.statement = s
infer_reachability_of_if_statement(s, self.options)
for i in range(len(s.expr)):
s.expr[i].accept(self)
self.visit_block(s.body[i])
self.visit_block_maybe(s.else_body)
def visit_try_stmt(self, s: TryStmt) -> None:
self.statement = s
self.analyze_try_stmt(s, self)
def analyze_try_stmt(self, s: TryStmt, visitor: NodeVisitor[None]) -> None:
s.body.accept(visitor)
for type, var, handler in zip(s.types, s.vars, s.handlers):
if type:
type.accept(visitor)
if var:
self.analyze_lvalue(var)
handler.accept(visitor)
if s.else_body:
s.else_body.accept(visitor)
if s.finally_body:
s.finally_body.accept(visitor)
def visit_with_stmt(self, s: WithStmt) -> None:
self.statement = s
types: List[Type] = []
if s.unanalyzed_type:
assert isinstance(s.unanalyzed_type, ProperType)
actual_targets = [t for t in s.target if t is not None]
if len(actual_targets) == 0:
# We have a type for no targets
self.fail('Invalid type comment: "with" statement has no targets', s)
elif len(actual_targets) == 1:
# We have one target and one type
types = [s.unanalyzed_type]
elif isinstance(s.unanalyzed_type, TupleType):
# We have multiple targets and multiple types
if len(actual_targets) == len(s.unanalyzed_type.items):
types = s.unanalyzed_type.items.copy()
else:
# But it's the wrong number of items
self.fail('Incompatible number of types for "with" targets', s)
else:
self.fail('Multiple types expected for multiple "with" targets', s)
new_types: List[Type] = []
for e, n in zip(s.expr, s.target):
e.accept(self)
if n:
self.analyze_lvalue(n, explicit_type=s.unanalyzed_type is not None)
if types:
t = types.pop(0)
if self.is_classvar(t):
self.fail_invalid_classvar(n)
allow_tuple_literal = isinstance(n, TupleExpr)
analyzed = self.anal_type(t, allow_tuple_literal=allow_tuple_literal)
if analyzed is not None:
new_types.append(analyzed)
self.store_declared_types(n, analyzed)
s.analyzed_types = new_types
self.visit_block(s.body)
def visit_del_stmt(self, s: DelStmt) -> None:
self.statement = s
s.expr.accept(self)
if not self.is_valid_del_target(s.expr):
self.fail('Invalid delete target', s)
def is_valid_del_target(self, s: Expression) -> bool:
if isinstance(s, (IndexExpr, NameExpr, MemberExpr)):
return True
elif isinstance(s, (TupleExpr, ListExpr)):
return all(self.is_valid_del_target(item) for item in s.items)
else:
return False
def visit_global_decl(self, g: GlobalDecl) -> None:
self.statement = g
for name in g.names:
if name in self.nonlocal_decls[-1]:
self.fail('Name "{}" is nonlocal and global'.format(name), g)
self.global_decls[-1].add(name)
def visit_nonlocal_decl(self, d: NonlocalDecl) -> None:
self.statement = d
if not self.is_func_scope():
self.fail("nonlocal declaration not allowed at module level", d)
else:
for name in d.names:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
break
else:
self.fail('No binding for nonlocal "{}" found'.format(name), d)
if self.locals[-1] is not None and name in self.locals[-1]:
self.fail('Name "{}" is already defined in local '
'scope before nonlocal declaration'.format(name), d)
if name in self.global_decls[-1]:
self.fail('Name "{}" is nonlocal and global'.format(name), d)
self.nonlocal_decls[-1].add(name)
def visit_print_stmt(self, s: PrintStmt) -> None:
self.statement = s
for arg in s.args:
arg.accept(self)
if s.target:
s.target.accept(self)
def visit_exec_stmt(self, s: ExecStmt) -> None:
self.statement = s
s.expr.accept(self)
if s.globals:
s.globals.accept(self)
if s.locals:
s.locals.accept(self)
def visit_name_expr(self, expr: NameExpr) -> None:
n = self.lookup(expr.name, expr)
if n:
self.bind_name_expr(expr, n)
def bind_name_expr(self, expr: NameExpr, sym: SymbolTableNode) -> None:
if isinstance(sym.node, TypeVarExpr) and self.tvar_scope.get_binding(sym):
self.fail('"{}" is a type variable and only valid in type '
'context'.format(expr.name), expr)
elif isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'name', expr)
else:
expr.kind = sym.kind
expr.node = sym.node
expr.fullname = sym.fullname
def visit_super_expr(self, expr: SuperExpr) -> None:
if not self.type and not expr.call.args:
self.fail('"super" used outside class', expr)
return
expr.info = self.type
for arg in expr.call.args:
arg.accept(self)
def visit_tuple_expr(self, expr: TupleExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_list_expr(self, expr: ListExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_set_expr(self, expr: SetExpr) -> None:
for item in expr.items:
if isinstance(item, StarExpr):
item.valid = True
item.accept(self)
def visit_dict_expr(self, expr: DictExpr) -> None:
for key, value in expr.items:
if key is not None:
key.accept(self)
value.accept(self)
def visit_star_expr(self, expr: StarExpr) -> None:
if not expr.valid:
self.fail('Can use starred expression only as assignment target', expr)
else:
expr.expr.accept(self)
def visit_yield_from_expr(self, e: YieldFromExpr) -> None:
if not self.is_func_scope():
self.fail('"yield from" outside function', e, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
self.fail('"yield from" in async function', e, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
if e.expr:
e.expr.accept(self)
def visit_call_expr(self, expr: CallExpr) -> None:
expr.callee.accept(self)
if refers_to_fullname(expr.callee, 'typing.cast'):
if not self.check_fixed_args(expr, 2, 'cast'):
return
try:
target = self.expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Cast target is not a type', expr)
return
expr.analyzed = CastExpr(expr.args[1], target)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_type'):
if not self.check_fixed_args(expr, 1, 'reveal_type'):
return
expr.analyzed = RevealExpr(kind=REVEAL_TYPE, expr=expr.args[0])
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.reveal_locals'):
local_nodes: List[Var] = []
if self.is_module_scope():
# Each SymbolTableNode has an attribute node that is nodes.Var
# look for variable nodes that marked as is_inferred
# Each symboltable node has a Var node as .node
local_nodes = [n.node
for name, n in self.globals.items()
if getattr(n.node, 'is_inferred', False)
and isinstance(n.node, Var)]
elif self.is_class_scope():
# type = None # type: Optional[TypeInfo]
if self.type is not None:
local_nodes = [st.node
for st in self.type.names.values()
if isinstance(st.node, Var)]
elif self.is_func_scope():
# locals = None # type: List[Optional[SymbolTable]]
if self.locals is not None:
symbol_table = self.locals[-1]
if symbol_table is not None:
local_nodes = [st.node
for st in symbol_table.values()
if isinstance(st.node, Var)]
expr.analyzed = RevealExpr(kind=REVEAL_LOCALS, local_nodes=local_nodes)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'typing.Any'):
# Special form Any(...) no longer supported.
self.fail('Any(...) is no longer supported. Use cast(Any, ...) instead', expr)
elif refers_to_fullname(expr.callee, 'typing._promote'):
# Special form _promote(...).
if not self.check_fixed_args(expr, 1, '_promote'):
return
# Translate first argument to an unanalyzed type.
try:
target = self.expr_to_unanalyzed_type(expr.args[0])
except TypeTranslationError:
self.fail('Argument 1 to _promote is not a type', expr)
return
expr.analyzed = PromoteExpr(target)
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
elif refers_to_fullname(expr.callee, 'builtins.dict'):
expr.analyzed = self.translate_dict_call(expr)
elif refers_to_fullname(expr.callee, 'builtins.divmod'):
if not self.check_fixed_args(expr, 2, 'divmod'):
return
expr.analyzed = OpExpr('divmod', expr.args[0], expr.args[1])
expr.analyzed.line = expr.line
expr.analyzed.accept(self)
else:
# Normal call expression.
for a in expr.args:
a.accept(self)
if (isinstance(expr.callee, MemberExpr) and
isinstance(expr.callee.expr, NameExpr) and
expr.callee.expr.name == '__all__' and
expr.callee.expr.kind == GDEF and
expr.callee.name in ('append', 'extend')):
if expr.callee.name == 'append' and expr.args:
self.add_exports(expr.args[0])
elif (expr.callee.name == 'extend' and expr.args and
isinstance(expr.args[0], (ListExpr, TupleExpr))):
self.add_exports(expr.args[0].items)
def translate_dict_call(self, call: CallExpr) -> Optional[DictExpr]:
if not all(kind == ARG_NAMED for kind in call.arg_kinds):
# Must still accept those args.
for a in call.args:
a.accept(self)
return None
expr = DictExpr([(StrExpr(cast(str, key)), value) # since they are all ARG_NAMED
for key, value in zip(call.arg_names, call.args)])
expr.set_line(call)
expr.accept(self)
return expr
def check_fixed_args(self, expr: CallExpr, numargs: int,
name: str) -> bool:
s = 's'
if numargs == 1:
s = ''
if len(expr.args) != numargs:
self.fail('"%s" expects %d argument%s' % (name, numargs, s),
expr)
return False
if expr.arg_kinds != [ARG_POS] * numargs:
self.fail('"%s" must be called with %s positional argument%s' %
(name, numargs, s), expr)
return False
return True
def visit_member_expr(self, expr: MemberExpr) -> None:
base = expr.expr
base.accept(self)
if isinstance(base, RefExpr) and isinstance(base.node, MypyFile):
# Handle module attribute.
sym = self.get_module_symbol(base.node, expr.name)
if sym:
if isinstance(sym.node, PlaceholderNode):
self.process_placeholder(expr.name, 'attribute', expr)
return
expr.kind = sym.kind
expr.fullname = sym.fullname
expr.node = sym.node
elif isinstance(base, RefExpr):
# This branch handles the case C.bar (or cls.bar or self.bar inside
# a classmethod/method), where C is a class and bar is a type
# definition or a module resulting from `import bar` (or a module
# assignment) inside class C. We look up bar in the class' TypeInfo
type_info = None
if isinstance(base.node, TypeInfo):
type_info = base.node
elif isinstance(base.node, Var) and self.type and self.function_stack:
func_def = self.function_stack[-1]
if not func_def.is_static and isinstance(func_def.type, CallableType):
formal_arg = func_def.type.argument_by_name(base.node.name)
if formal_arg and formal_arg.pos == 0:
type_info = self.type
elif isinstance(base.node, TypeAlias) and base.node.no_args:
assert isinstance(base.node.target, ProperType)
if isinstance(base.node.target, Instance):
type_info = base.node.target.type
if type_info:
n = type_info.names.get(expr.name)
if n is not None and isinstance(n.node, (MypyFile, TypeInfo, TypeAlias)):
if not n:
return
expr.kind = n.kind
expr.fullname = n.fullname
expr.node = n.node
def visit_op_expr(self, expr: OpExpr) -> None:
expr.left.accept(self)
if expr.op in ('and', 'or'):
inferred = infer_condition_value(expr.left, self.options)
if ((inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'and') or
(inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'or')):
expr.right_unreachable = True
return
elif ((inferred in (ALWAYS_TRUE, MYPY_TRUE) and expr.op == 'and') or
(inferred in (ALWAYS_FALSE, MYPY_FALSE) and expr.op == 'or')):
expr.right_always = True
expr.right.accept(self)
def visit_comparison_expr(self, expr: ComparisonExpr) -> None:
for operand in expr.operands:
operand.accept(self)
def visit_unary_expr(self, expr: UnaryExpr) -> None:
expr.expr.accept(self)
def visit_index_expr(self, expr: IndexExpr) -> None:
base = expr.base
base.accept(self)
if (isinstance(base, RefExpr)
and isinstance(base.node, TypeInfo)
and not base.node.is_generic()):
expr.index.accept(self)
elif ((isinstance(base, RefExpr) and isinstance(base.node, TypeAlias))
or refers_to_class_or_function(base)):
self.analyze_type_application(expr)
else:
expr.index.accept(self)
def analyze_type_application(self, expr: IndexExpr) -> None:
types = self.analyze_type_application_args(expr)
if types is None:
return
base = expr.base
expr.analyzed = TypeApplication(base, types)
expr.analyzed.line = expr.line
expr.analyzed.column = expr.column
if isinstance(base, RefExpr) and isinstance(base.node, TypeAlias):
alias = base.node
target = get_proper_type(alias.target)
if isinstance(target, Instance):
name = target.type.fullname
if (alias.no_args and
name in get_nongen_builtins(self.options.python_version) and
not alias.normalized):
self.fail(no_subscript_builtin_alias(name, propose_alt=False), expr)
else:
n = self.lookup_type_node(base)
if (n and n.fullname in get_nongen_builtins(self.options.python_version) and
not self.is_stub_file):
self.fail(no_subscript_builtin_alias(n.fullname, propose_alt=False), expr)
def analyze_type_application_args(self, expr: IndexExpr) -> Optional[List[Type]]:
index = expr.index
tag = self.track_incomplete_refs()
self.analyze_type_expr(index)
if self.found_incomplete_ref(tag):
return None
types: List[Type] = []
if isinstance(index, TupleExpr):
items = index.items
is_tuple = isinstance(expr.base, RefExpr) and expr.base.fullname == 'builtins.tuple'
if is_tuple and len(items) == 2 and isinstance(items[-1], EllipsisExpr):
items = items[:-1]
else:
items = [index]
for item in items:
try:
typearg = self.expr_to_unanalyzed_type(item)
except TypeTranslationError:
self.fail('Type expected within [...]', expr)
return None
analyzed = self.anal_type(typearg, allow_unbound_tvars=True,
allow_placeholder=True)
if analyzed is None:
return None
types.append(analyzed)
return types
def visit_slice_expr(self, expr: SliceExpr) -> None:
if expr.begin_index:
expr.begin_index.accept(self)
if expr.end_index:
expr.end_index.accept(self)
if expr.stride:
expr.stride.accept(self)
def visit_cast_expr(self, expr: CastExpr) -> None:
expr.expr.accept(self)
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_reveal_expr(self, expr: RevealExpr) -> None:
if expr.kind == REVEAL_TYPE:
if expr.expr is not None:
expr.expr.accept(self)
else:
pass
def visit_type_application(self, expr: TypeApplication) -> None:
expr.expr.accept(self)
for i in range(len(expr.types)):
analyzed = self.anal_type(expr.types[i])
if analyzed is not None:
expr.types[i] = analyzed
def visit_list_comprehension(self, expr: ListComprehension) -> None:
expr.generator.accept(self)
def visit_set_comprehension(self, expr: SetComprehension) -> None:
expr.generator.accept(self)
def visit_dictionary_comprehension(self, expr: DictionaryComprehension) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.key.accept(self)
expr.value.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def visit_generator_expr(self, expr: GeneratorExpr) -> None:
self.enter(expr)
self.analyze_comp_for(expr)
expr.left_expr.accept(self)
self.leave()
self.analyze_comp_for_2(expr)
def analyze_comp_for(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
for i, (index, sequence, conditions) in enumerate(zip(expr.indices,
expr.sequences,
expr.condlists)):
if i > 0:
sequence.accept(self)
self.analyze_lvalue(index)
for cond in conditions:
cond.accept(self)
def analyze_comp_for_2(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
expr.sequences[0].accept(self)
def visit_lambda_expr(self, expr: LambdaExpr) -> None:
self.analyze_arg_initializers(expr)
self.analyze_function_body(expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> None:
expr.if_expr.accept(self)
expr.cond.accept(self)
expr.else_expr.accept(self)
def visit_backquote_expr(self, expr: BackquoteExpr) -> None:
expr.expr.accept(self)
def visit__promote_expr(self, expr: PromoteExpr) -> None:
analyzed = self.anal_type(expr.type)
if analyzed is not None:
expr.type = analyzed
def visit_yield_expr(self, expr: YieldExpr) -> None:
if not self.is_func_scope():
self.fail('"yield" outside function', expr, serious=True, blocker=True)
else:
if self.function_stack[-1].is_coroutine:
if self.options.python_version < (3, 6):
self.fail('"yield" in async function', expr, serious=True, blocker=True)
else:
self.function_stack[-1].is_generator = True
self.function_stack[-1].is_async_generator = True
else:
self.function_stack[-1].is_generator = True
if expr.expr:
expr.expr.accept(self)
def visit_await_expr(self, expr: AwaitExpr) -> None:
if not self.is_func_scope():
self.fail('"await" outside function', expr)
elif not self.function_stack[-1].is_coroutine:
self.fail('"await" outside coroutine ("async def")', expr)
expr.expr.accept(self)
def lookup(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
implicit_name = False
if name in self.global_decls[-1]:
if name in self.globals:
return self.globals[name]
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
if name in self.nonlocal_decls[-1]:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
return table[name]
else:
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
if self.type and not self.is_func_scope() and name in self.type.names:
node = self.type.names[name]
if not node.implicit:
if self.is_active_symbol_in_class_body(node.node):
return node
else:
implicit_name = True
implicit_node = node
for table in reversed(self.locals):
if table is not None and name in table:
return table[name]
if name in self.globals:
return self.globals[name]
b = self.globals.get('__builtins__', None)
if b:
assert isinstance(b.node, MypyFile)
table = b.node.names
if name in table:
if name[0] == "_" and name[1] != "_":
if not suppress_errors:
self.name_not_defined(name, ctx)
return None
node = table[name]
return node
if not implicit_name and not suppress_errors:
self.name_not_defined(name, ctx)
else:
if implicit_name:
return implicit_node
return None
def is_active_symbol_in_class_body(self, node: Optional[SymbolNode]) -> bool:
assert self.statement
return (node is None
or self.is_textually_before_statement(node)
or not self.is_defined_in_current_module(node.fullname)
or isinstance(node, TypeInfo)
or (isinstance(node, PlaceholderNode) and node.becomes_typeinfo))
def is_textually_before_statement(self, node: SymbolNode) -> bool:
assert self.statement
line_diff = self.statement.line - node.line
if self.is_overloaded_item(node, self.statement):
return False
elif isinstance(node, Decorator) and not node.is_overload:
return line_diff > len(node.original_decorators)
else:
return line_diff > 0
def is_overloaded_item(self, node: SymbolNode, statement: Statement) -> bool:
if isinstance(node, OverloadedFuncDef) and isinstance(statement, FuncDef):
in_items = statement in {item.func if isinstance(item, Decorator)
else item for item in node.items}
in_impl = (node.impl is not None and
((isinstance(node.impl, Decorator) and statement is node.impl.func)
or statement is node.impl))
return in_items or in_impl
return False
def is_defined_in_current_module(self, fullname: Optional[str]) -> bool:
if fullname is None:
return False
return module_prefix(self.modules, fullname) == self.cur_mod_id
def lookup_qualified(self, name: str, ctx: Context,
suppress_errors: bool = False) -> Optional[SymbolTableNode]:
if '.' not in name:
return self.lookup(name, ctx, suppress_errors=suppress_errors)
parts = name.split('.')
namespace = self.cur_mod_id
sym = self.lookup(parts[0], ctx, suppress_errors=suppress_errors)
if sym:
for i in range(1, len(parts)):
node = sym.node
part = parts[i]
if isinstance(node, TypeInfo):
nextsym = node.get(part)
elif isinstance(node, MypyFile):
nextsym = self.get_module_symbol(node, part)
namespace = node.fullname
elif isinstance(node, PlaceholderNode):
return sym
elif isinstance(node, TypeAlias) and node.no_args:
assert isinstance(node.target, ProperType)
if isinstance(node.target, Instance):
nextsym = node.target.type.get(part)
else:
if isinstance(node, Var):
typ = get_proper_type(node.type)
if isinstance(typ, AnyType):
return self.implicit_symbol(sym, name, parts[i:], typ)
nextsym = None
if not nextsym or nextsym.module_hidden:
if not suppress_errors:
self.name_not_defined(name, ctx, namespace=namespace)
return None
sym = nextsym
return sym
def lookup_type_node(self, expr: Expression) -> Optional[SymbolTableNode]:
try:
t = self.expr_to_unanalyzed_type(expr)
except TypeTranslationError:
return None
if isinstance(t, UnboundType):
n = self.lookup_qualified(t.name, expr, suppress_errors=True)
return n
return None
def get_module_symbol(self, node: MypyFile, name: str) -> Optional[SymbolTableNode]:
module = node.fullname
names = node.names
sym = names.get(name)
if not sym:
fullname = module + '.' + name
if fullname in self.modules:
sym = SymbolTableNode(GDEF, self.modules[fullname])
elif self.is_incomplete_namespace(module):
self.record_incomplete_ref()
elif ('__getattr__' in names
and (node.is_stub
or self.options.python_version >= (3, 7))):
gvar = self.create_getattr_var(names['__getattr__'], name, fullname)
if gvar:
sym = SymbolTableNode(GDEF, gvar)
elif self.is_missing_module(fullname):
var_type = AnyType(TypeOfAny.from_unimported_type)
v = Var(name, type=var_type)
v._fullname = fullname
sym = SymbolTableNode(GDEF, v)
elif sym.module_hidden:
sym = None
return sym
def is_missing_module(self, module: str) -> bool:
return module in self.missing_modules
def implicit_symbol(self, sym: SymbolTableNode, name: str, parts: List[str],
source_type: AnyType) -> SymbolTableNode:
if sym.node is None:
basename = None
else:
basename = sym.node.fullname
if basename is None:
fullname = name
else:
fullname = basename + '.' + '.'.join(parts)
var_type = AnyType(TypeOfAny.from_another_any, source_type)
var = Var(parts[-1], var_type)
var._fullname = fullname
return SymbolTableNode(GDEF, var)
def create_getattr_var(self, getattr_defn: SymbolTableNode,
name: str, fullname: str) -> Optional[Var]:
if isinstance(getattr_defn.node, (FuncDef, Var)):
node_type = get_proper_type(getattr_defn.node.type)
if isinstance(node_type, CallableType):
typ = node_type.ret_type
else:
typ = AnyType(TypeOfAny.from_error)
v = Var(name, type=typ)
v._fullname = fullname
v.from_module_getattr = True
return v
return None
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
parts = name.split('.')
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
next_sym = n.names[parts[i]]
assert isinstance(next_sym.node, MypyFile)
n = next_sym.node
return n.names[parts[-1]]
def lookup_fully_qualified_or_none(self, fullname: str) -> Optional[SymbolTableNode]:
assert '.' in fullname
module, name = fullname.rsplit('.', maxsplit=1)
if module not in self.modules:
return None
filenode = self.modules[module]
result = filenode.names.get(name)
if result is None and self.is_incomplete_namespace(module):
self.record_incomplete_ref()
return result
def builtin_type(self, fully_qualified_name: str) -> Instance:
sym = self.lookup_fully_qualified(fully_qualified_name)
node = sym.node
assert isinstance(node, TypeInfo)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def object_type(self) -> Instance:
return self.named_type('__builtins__.object')
def str_type(self) -> Instance:
return self.named_type('__builtins__.str')
def named_type(self, qualified_name: str, args: Optional[List[Type]] = None) -> Instance:
sym = self.lookup_qualified(qualified_name, Context())
assert sym, "Internal error: attempted to construct unknown type"
node = sym.node
assert isinstance(node, TypeInfo)
if args:
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.special_form)] * len(node.defn.type_vars))
def named_type_or_none(self, qualified_name: str,
args: Optional[List[Type]] = None) -> Optional[Instance]:
sym = self.lookup_fully_qualified_or_none(qualified_name)
if not sym or isinstance(sym.node, PlaceholderNode):
return None
node = sym.node
if isinstance(node, TypeAlias):
assert isinstance(node.target, Instance)
node = node.target.type
assert isinstance(node, TypeInfo), node
if args is not None:
return Instance(node, args)
return Instance(node, [AnyType(TypeOfAny.unannotated)] * len(node.defn.type_vars))
def lookup_current_scope(self, name: str) -> Optional[SymbolTableNode]:
if self.locals[-1] is not None:
return self.locals[-1].get(name)
elif self.type is not None:
return self.type.names.get(name)
else:
return self.globals.get(name)
def add_symbol(self,
name: str,
node: SymbolNode,
context: Context,
module_public: bool = True,
module_hidden: bool = False,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
if self.is_func_scope():
kind = LDEF
elif self.type is not None:
kind = MDEF
else:
kind = GDEF
symbol = SymbolTableNode(kind,
node,
module_public=module_public,
module_hidden=module_hidden)
return self.add_symbol_table_node(name, symbol, context, can_defer, escape_comprehensions)
def add_symbol_skip_local(self, name: str, node: SymbolNode) -> None:
if self.type is not None:
names = self.type.names
kind = MDEF
else:
names = self.globals
kind = GDEF
symbol = SymbolTableNode(kind, node)
names[name] = symbol
def add_symbol_table_node(self,
name: str,
symbol: SymbolTableNode,
context: Optional[Context] = None,
can_defer: bool = True,
escape_comprehensions: bool = False) -> bool:
names = self.current_symbol_table(escape_comprehensions=escape_comprehensions)
existing = names.get(name)
if isinstance(symbol.node, PlaceholderNode) and can_defer:
self.defer(context)
if (existing is not None
and context is not None
and not is_valid_replacement(existing, symbol)):
# need to do anything.
old = existing.node
new = symbol.node
if isinstance(new, PlaceholderNode):
# We don't know whether this is okay. Let's wait until the next iteration.
return False
if not is_same_symbol(old, new):
if isinstance(new, (FuncDef, Decorator, OverloadedFuncDef, TypeInfo)):
self.add_redefinition(names, name, symbol)
if not (isinstance(new, (FuncDef, Decorator))
and self.set_original_def(old, new)):
self.name_already_defined(name, context, existing)
elif (name not in self.missing_names[-1] and '*' not in self.missing_names[-1]):
names[name] = symbol
self.progress = True
return True
return False
def add_redefinition(self,
names: SymbolTable,
name: str,
symbol: SymbolTableNode) -> None:
i = 1
# Don't serialize redefined nodes. They are likely to have
# them.
symbol.no_serialize = True
while True:
if i == 1:
new_name = '{}-redefinition'.format(name)
else:
new_name = '{}-redefinition{}'.format(name, i)
existing = names.get(new_name)
if existing is None:
names[new_name] = symbol
return
elif existing.node is symbol.node:
# Already there
return
i += 1
def add_local(self, node: Union[Var, FuncDef, OverloadedFuncDef], context: Context) -> None:
assert self.is_func_scope()
name = node.name
node._fullname = name
self.add_symbol(name, node, context)
def add_module_symbol(self,
id: str,
as_id: str,
context: Context,
module_public: bool,
module_hidden: bool) -> None:
if id in self.modules:
node = self.modules[id]
self.add_symbol(as_id, node, context,
module_public=module_public,
module_hidden=module_hidden)
else:
self.add_unknown_imported_symbol(
as_id, context, target_name=id, module_public=module_public,
module_hidden=module_hidden
)
def add_imported_symbol(self,
name: str,
node: SymbolTableNode,
context: Context,
module_public: bool,
module_hidden: bool) -> None:
assert not module_hidden or not module_public
symbol = SymbolTableNode(node.kind, node.node,
module_public=module_public,
module_hidden=module_hidden)
self.add_symbol_table_node(name, symbol, context)
def add_unknown_imported_symbol(self,
name: str,
context: Context,
target_name: Optional[str],
module_public: bool,
module_hidden: bool) -> None:
existing = self.current_symbol_table().get(name)
if existing and isinstance(existing.node, Var) and existing.node.is_suppressed_import:
# This missing import was already added -- nothing to do here.
return
var = Var(name)
if self.options.logical_deps and target_name is not None:
# This makes it possible to add logical fine-grained dependencies
# from a missing module. We can't use this by default, since in a
var._fullname = target_name
elif self.type:
var._fullname = self.type.fullname + "." + name
var.info = self.type
else:
var._fullname = self.qualified_name(name)
var.is_ready = True
any_type = AnyType(TypeOfAny.from_unimported_type, missing_import_name=var._fullname)
var.type = any_type
var.is_suppressed_import = True
self.add_symbol(
name, var, context, module_public=module_public, module_hidden=module_hidden
)
@contextmanager
def tvar_scope_frame(self, frame: TypeVarLikeScope) -> Iterator[None]:
old_scope = self.tvar_scope
self.tvar_scope = frame
yield
self.tvar_scope = old_scope
def defer(self, debug_context: Optional[Context] = None) -> None:
assert not self.final_iteration, 'Must not defer during final iteration'
self.deferred = True
line = (debug_context.line if debug_context else
self.statement.line if self.statement else -1)
self.deferral_debug_context.append((self.cur_mod_id, line))
def track_incomplete_refs(self) -> Tag:
return self.num_incomplete_refs
def found_incomplete_ref(self, tag: Tag) -> bool:
return self.num_incomplete_refs != tag
def record_incomplete_ref(self) -> None:
self.defer()
self.num_incomplete_refs += 1
def mark_incomplete(self, name: str, node: Node,
becomes_typeinfo: bool = False,
module_public: bool = True,
module_hidden: bool = False) -> None:
self.defer(node)
if name == '*':
self.incomplete = True
elif not self.is_global_or_nonlocal(name):
fullname = self.qualified_name(name)
assert self.statement
placeholder = PlaceholderNode(fullname, node, self.statement.line,
becomes_typeinfo=becomes_typeinfo)
self.add_symbol(name, placeholder,
module_public=module_public, module_hidden=module_hidden,
context=dummy_context())
self.missing_names[-1].add(name)
def is_incomplete_namespace(self, fullname: str) -> bool:
return fullname in self.incomplete_namespaces
def process_placeholder(self, name: str, kind: str, ctx: Context) -> None:
if self.final_iteration:
self.cannot_resolve_name(name, kind, ctx)
else:
self.defer(ctx)
def cannot_resolve_name(self, name: str, kind: str, ctx: Context) -> None:
self.fail('Cannot resolve {} "{}" (possible cyclic definition)'.format(kind, name), ctx)
def qualified_name(self, name: str) -> str:
if self.type is not None:
return self.type._fullname + '.' + name
elif self.is_func_scope():
return name
else:
return self.cur_mod_id + '.' + name
def enter(self, function: Union[FuncItem, GeneratorExpr, DictionaryComprehension]) -> None:
names = self.saved_locals.setdefault(function, SymbolTable())
self.locals.append(names)
is_comprehension = isinstance(function, (GeneratorExpr, DictionaryComprehension))
self.is_comprehension_stack.append(is_comprehension)
self.global_decls.append(set())
self.nonlocal_decls.append(set())
self.block_depth.append(-1)
self.missing_names.append(set())
def leave(self) -> None:
self.locals.pop()
self.is_comprehension_stack.pop()
self.global_decls.pop()
self.nonlocal_decls.pop()
self.block_depth.pop()
self.missing_names.pop()
def is_func_scope(self) -> bool:
return self.locals[-1] is not None
def is_nested_within_func_scope(self) -> bool:
return any(l is not None for l in self.locals)
def is_class_scope(self) -> bool:
return self.type is not None and not self.is_func_scope()
def is_module_scope(self) -> bool:
return not (self.is_class_scope() or self.is_func_scope())
def current_symbol_kind(self) -> int:
if self.is_class_scope():
kind = MDEF
elif self.is_func_scope():
kind = LDEF
else:
kind = GDEF
return kind
def current_symbol_table(self, escape_comprehensions: bool = False) -> SymbolTable:
if self.is_func_scope():
assert self.locals[-1] is not None
if escape_comprehensions:
assert len(self.locals) == len(self.is_comprehension_stack)
for i, is_comprehension in enumerate(reversed(self.is_comprehension_stack)):
if not is_comprehension:
if i == len(self.locals) - 1:
names = self.globals
else:
names_candidate = self.locals[-1 - i]
assert names_candidate is not None, \
"Escaping comprehension from invalid scope"
names = names_candidate
break
else:
assert False, "Should have at least one non-comprehension scope"
else:
names = self.locals[-1]
assert names is not None
elif self.type is not None:
names = self.type.names
else:
names = self.globals
return names
def is_global_or_nonlocal(self, name: str) -> bool:
return (self.is_func_scope()
and (name in self.global_decls[-1]
or name in self.nonlocal_decls[-1]))
def add_exports(self, exp_or_exps: Union[Iterable[Expression], Expression]) -> None:
exps = [exp_or_exps] if isinstance(exp_or_exps, Expression) else exp_or_exps
for exp in exps:
if isinstance(exp, StrExpr):
self.all_exports.append(exp.value)
def check_no_global(self,
name: str,
ctx: Context,
is_overloaded_func: bool = False) -> None:
if name in self.globals:
prev_is_overloaded = isinstance(self.globals[name], OverloadedFuncDef)
if is_overloaded_func and prev_is_overloaded:
self.fail("Nonconsecutive overload {} found".format(name), ctx)
elif prev_is_overloaded:
self.fail("Definition of '{}' missing 'overload'".format(name), ctx)
else:
self.name_already_defined(name, ctx, self.globals[name])
def name_not_defined(self, name: str, ctx: Context, namespace: Optional[str] = None) -> None:
incomplete = self.is_incomplete_namespace(namespace or self.cur_mod_id)
if (namespace is None
and self.type
and not self.is_func_scope()
and self.incomplete_type_stack[-1]
and not self.final_iteration):
incomplete = True
if incomplete:
# later on. Defer current target.
self.record_incomplete_ref()
return
message = 'Name "{}" is not defined'.format(name)
self.fail(message, ctx, code=codes.NAME_DEFINED)
if 'builtins.{}'.format(name) in SUGGESTED_TEST_FIXTURES:
# The user probably has a missing definition in a test fixture. Let's verify.
fullname = 'builtins.{}'.format(name)
if self.lookup_fully_qualified_or_none(fullname) is None:
self.msg.add_fixture_note(fullname, ctx)
modules_with_unimported_hints = {
name.split('.', 1)[0]
for name in TYPES_FOR_UNIMPORTED_HINTS
}
lowercased = {
name.lower(): name
for name in TYPES_FOR_UNIMPORTED_HINTS
}
for module in modules_with_unimported_hints:
fullname = '{}.{}'.format(module, name).lower()
if fullname not in lowercased:
continue
hint = (
'Did you forget to import it from "{module}"?'
' (Suggestion: "from {module} import {name}")'
).format(module=module, name=lowercased[fullname].rsplit('.', 1)[-1])
self.note(hint, ctx, code=codes.NAME_DEFINED)
def already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]],
noun: str) -> None:
if isinstance(original_ctx, SymbolTableNode):
node: Optional[SymbolNode] = original_ctx.node
elif isinstance(original_ctx, SymbolNode):
node = original_ctx
else:
node = None
if isinstance(original_ctx, SymbolTableNode) and isinstance(original_ctx.node, MypyFile):
extra_msg = ' (by an import)'
elif node and node.line != -1 and self.is_local_name(node.fullname):
extra_msg = ' on line {}'.format(node.line)
else:
extra_msg = ' (possibly by an import)'
self.fail('{} "{}" already defined{}'.format(noun, unmangle(name), extra_msg), ctx,
code=codes.NO_REDEF)
def name_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Name')
def attribute_already_defined(self,
name: str,
ctx: Context,
original_ctx: Optional[Union[SymbolTableNode, SymbolNode]] = None
) -> None:
self.already_defined(name, ctx, original_ctx, noun='Attribute')
def is_local_name(self, name: str) -> bool:
return self.is_defined_in_current_module(name) or '.' not in name
def fail(self,
msg: str,
ctx: Context,
serious: bool = False,
*,
code: Optional[ErrorCode] = None,
blocker: bool = False) -> None:
if (not serious and
not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
assert ctx is not None, msg
self.errors.report(ctx.get_line(), ctx.get_column(), msg, blocker=blocker, code=code)
def fail_blocker(self, msg: str, ctx: Context) -> None:
self.fail(msg, ctx, blocker=True)
def note(self, msg: str, ctx: Context, code: Optional[ErrorCode] = None) -> None:
if (not self.options.check_untyped_defs and
self.function_stack and
self.function_stack[-1].is_dynamic()):
return
self.errors.report(ctx.get_line(), ctx.get_column(), msg, severity='note', code=code)
def accept(self, node: Node) -> None:
try:
node.accept(self)
except Exception as err:
report_internal_error(err, self.errors.file, node.line, self.errors, self.options)
def expr_to_analyzed_type(self,
expr: Expression,
report_invalid_types: bool = True,
allow_placeholder: bool = False) -> Optional[Type]:
if isinstance(expr, CallExpr):
expr.accept(self)
internal_name, info = self.named_tuple_analyzer.check_namedtuple(expr, None,
self.is_func_scope())
if internal_name is None:
raise TypeTranslationError()
elif not info:
self.defer(expr)
return None
assert info.tuple_type, "NamedTuple without tuple type"
fallback = Instance(info, [])
return TupleType(info.tuple_type.items, fallback=fallback)
typ = self.expr_to_unanalyzed_type(expr)
return self.anal_type(typ, report_invalid_types=report_invalid_types,
allow_placeholder=allow_placeholder)
def analyze_type_expr(self, expr: Expression) -> None:
# There are certain expressions that mypy does not need to semantically analyze,
# since they analyzed solely as type. (For example, indexes in type alias definitions
# and base classes in class defs). External consumers of the mypy AST may need
# them semantically analyzed, however, if they need to treat it as an expression
# and not a type. (Which is to say, mypyc needs to do this.) Do the analysis
# in a fresh tvar scope in order to suppress any errors about using type variables.
with self.tvar_scope_frame(TypeVarLikeScope()):
expr.accept(self)
def type_analyzer(self, *,
tvar_scope: Optional[TypeVarLikeScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True) -> TypeAnalyser:
if tvar_scope is None:
tvar_scope = self.tvar_scope
tpan = TypeAnalyser(self,
tvar_scope,
self.plugin,
self.options,
self.is_typeshed_stub_file,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
report_invalid_types=report_invalid_types,
allow_new_syntax=self.is_stub_file,
allow_placeholder=allow_placeholder)
tpan.in_dynamic_func = bool(self.function_stack and self.function_stack[-1].is_dynamic())
tpan.global_scope = not self.type and not self.function_stack
return tpan
def expr_to_unanalyzed_type(self, node: Expression) -> ProperType:
return expr_to_unanalyzed_type(node, self.options, self.is_stub_file)
def anal_type(self,
typ: Type, *,
tvar_scope: Optional[TypeVarLikeScope] = None,
allow_tuple_literal: bool = False,
allow_unbound_tvars: bool = False,
allow_placeholder: bool = False,
report_invalid_types: bool = True,
third_pass: bool = False) -> Optional[Type]:
a = self.type_analyzer(tvar_scope=tvar_scope,
allow_unbound_tvars=allow_unbound_tvars,
allow_tuple_literal=allow_tuple_literal,
allow_placeholder=allow_placeholder,
report_invalid_types=report_invalid_types)
tag = self.track_incomplete_refs()
typ = typ.accept(a)
if self.found_incomplete_ref(tag):
# Something could not be bound yet.
return None
self.add_type_alias_deps(a.aliases_used)
return typ
def class_type(self, self_type: Type) -> Type:
return TypeType.make_normalized(self_type)
def schedule_patch(self, priority: int, patch: Callable[[], None]) -> None:
self.patches.append((priority, patch))
def report_hang(self) -> None:
print('Deferral trace:')
for mod, line in self.deferral_debug_context:
print(' {}:{}'.format(mod, line))
self.errors.report(-1, -1,
'INTERNAL ERROR: maximum semantic analysis iteration count reached',
blocker=True)
def add_plugin_dependency(self, trigger: str, target: Optional[str] = None) -> None:
if target is None:
target = self.scope.current_target()
self.cur_mod_node.plugin_deps.setdefault(trigger, set()).add(target)
def add_type_alias_deps(self,
aliases_used: Iterable[str],
target: Optional[str] = None) -> None:
if not aliases_used:
# A basic optimization to avoid adding targets with no dependencies to
# the `alias_deps` dict.
return
if target is None:
target = self.scope.current_target()
self.cur_mod_node.alias_deps[target].update(aliases_used)
def is_mangled_global(self, name: str) -> bool:
# A global is mangled if there exists at least one renamed variant.
return unmangle(name) + "'" in self.globals
def is_initial_mangled_global(self, name: str) -> bool:
return name == unmangle(name) + "'"
def parse_bool(self, expr: Expression) -> Optional[bool]:
if isinstance(expr, NameExpr):
if expr.fullname == 'builtins.True':
return True
if expr.fullname == 'builtins.False':
return False
return None
def set_future_import_flags(self, module_name: str) -> None:
if module_name in FUTURE_IMPORTS:
self.future_import_flags.add(FUTURE_IMPORTS[module_name])
def is_future_flag_set(self, flag: str) -> bool:
return flag in self.future_import_flags
class HasPlaceholders(TypeQuery[bool]):
def __init__(self) -> None:
super().__init__(any)
def visit_placeholder_type(self, t: PlaceholderType) -> bool:
return True
def has_placeholder(typ: Type) -> bool:
return typ.accept(HasPlaceholders())
def replace_implicit_first_type(sig: FunctionLike, new: Type) -> FunctionLike:
if isinstance(sig, CallableType):
if len(sig.arg_types) == 0:
return sig
return sig.copy_modified(arg_types=[new] + sig.arg_types[1:])
elif isinstance(sig, Overloaded):
return Overloaded([cast(CallableType, replace_implicit_first_type(i, new))
for i in sig.items()])
else:
assert False
def refers_to_fullname(node: Expression, fullname: str) -> bool:
if not isinstance(node, RefExpr):
return False
if node.fullname == fullname:
return True
if isinstance(node.node, TypeAlias):
target = get_proper_type(node.node.target)
if isinstance(target, Instance) and target.type.fullname == fullname:
return True
return False
def refers_to_class_or_function(node: Expression) -> bool:
return (isinstance(node, RefExpr) and
isinstance(node.node, (TypeInfo, FuncDef, OverloadedFuncDef)))
def find_duplicate(list: List[T]) -> Optional[T]:
for i in range(1, len(list)):
if list[i] in list[:i]:
return list[i]
return None
def remove_imported_names_from_symtable(names: SymbolTable,
module: str) -> None:
removed: List[str] = []
for name, node in names.items():
if node.node is None:
continue
fullname = node.node.fullname
prefix = fullname[:fullname.rfind('.')]
if prefix != module:
removed.append(name)
for name in removed:
del names[name]
def make_any_non_explicit(t: Type) -> Type:
return t.accept(MakeAnyNonExplicit())
class MakeAnyNonExplicit(TypeTranslator):
def visit_any(self, t: AnyType) -> Type:
if t.type_of_any == TypeOfAny.explicit:
return t.copy_modified(TypeOfAny.special_form)
return t
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
return t.copy_modified(args=[a.accept(self) for a in t.args])
def apply_semantic_analyzer_patches(patches: List[Tuple[int, Callable[[], None]]]) -> None:
patches_by_priority = sorted(patches, key=lambda x: x[0])
for priority, patch_func in patches_by_priority:
patch_func()
def names_modified_by_assignment(s: AssignmentStmt) -> List[NameExpr]:
result: List[NameExpr] = []
for lvalue in s.lvalues:
result += names_modified_in_lvalue(lvalue)
return result
def names_modified_in_lvalue(lvalue: Lvalue) -> List[NameExpr]:
if isinstance(lvalue, NameExpr):
return [lvalue]
elif isinstance(lvalue, StarExpr):
return names_modified_in_lvalue(lvalue.expr)
elif isinstance(lvalue, (ListExpr, TupleExpr)):
result: List[NameExpr] = []
for item in lvalue.items:
result += names_modified_in_lvalue(item)
return result
return []
def is_same_var_from_getattr(n1: Optional[SymbolNode], n2: Optional[SymbolNode]) -> bool:
return (isinstance(n1, Var)
and n1.from_module_getattr
and isinstance(n2, Var)
and n2.from_module_getattr
and n1.fullname == n2.fullname)
def dummy_context() -> Context:
return TempNode(AnyType(TypeOfAny.special_form))
def is_valid_replacement(old: SymbolTableNode, new: SymbolTableNode) -> bool:
if isinstance(old.node, PlaceholderNode):
if isinstance(new.node, PlaceholderNode):
return not old.node.becomes_typeinfo and new.node.becomes_typeinfo
else:
return True
return False
def is_same_symbol(a: Optional[SymbolNode], b: Optional[SymbolNode]) -> bool:
return (a == b
or (isinstance(a, PlaceholderNode)
and isinstance(b, PlaceholderNode))
or is_same_var_from_getattr(a, b))
| true | true |
1c39ac4dc7d7da37e6d7c8e1160c9f4d77f1d1c3 | 838 | py | Python | nova/scheduler/weights/software_mgmt.py | teresa-ho/stx-nova | 1f82323439da2449edbbaed2fe1c8414a550c86f | [
"Apache-2.0"
] | null | null | null | nova/scheduler/weights/software_mgmt.py | teresa-ho/stx-nova | 1f82323439da2449edbbaed2fe1c8414a550c86f | [
"Apache-2.0"
] | null | null | null | nova/scheduler/weights/software_mgmt.py | teresa-ho/stx-nova | 1f82323439da2449edbbaed2fe1c8414a550c86f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
"""
SoftwareMgmtWeigher:
- Prefer hosts that are patch current while there remains other hosts that are
not current.
- Prefer hosts that are upgrades current while upgrades are in progress.
"""
from oslo_config import cfg
from nova.scheduler import weights
CONF = cfg.CONF
class SoftwareMgmtWeigher(weights.BaseHostWeigher):
minval = 0
def _weigh_object(self, host_state, weight_properties):
"""Higher weights win. We want to choose the preferred hosts."""
weight = 0.0
if host_state.patch_prefer:
weight += CONF.filter_scheduler.swmgmt_patch_weight_multiplier
if host_state.upgrade_prefer:
weight += CONF.filter_scheduler.swmgmt_upgrade_weight_multiplier
return weight
| 27.933333 | 78 | 0.727924 |
from oslo_config import cfg
from nova.scheduler import weights
CONF = cfg.CONF
class SoftwareMgmtWeigher(weights.BaseHostWeigher):
minval = 0
def _weigh_object(self, host_state, weight_properties):
weight = 0.0
if host_state.patch_prefer:
weight += CONF.filter_scheduler.swmgmt_patch_weight_multiplier
if host_state.upgrade_prefer:
weight += CONF.filter_scheduler.swmgmt_upgrade_weight_multiplier
return weight
| true | true |
1c39acce1d04c58907ed9581a977b8853da4ae7c | 3,007 | py | Python | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/show_scaling_group_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/show_scaling_group_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/show_scaling_group_response.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowScalingGroupResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'scaling_group': 'ScalingGroups'
}
attribute_map = {
'scaling_group': 'scaling_group'
}
def __init__(self, scaling_group=None):
"""ShowScalingGroupResponse - a model defined in huaweicloud sdk"""
super(ShowScalingGroupResponse, self).__init__()
self._scaling_group = None
self.discriminator = None
if scaling_group is not None:
self.scaling_group = scaling_group
@property
def scaling_group(self):
"""Gets the scaling_group of this ShowScalingGroupResponse.
:return: The scaling_group of this ShowScalingGroupResponse.
:rtype: ScalingGroups
"""
return self._scaling_group
@scaling_group.setter
def scaling_group(self, scaling_group):
"""Sets the scaling_group of this ShowScalingGroupResponse.
:param scaling_group: The scaling_group of this ShowScalingGroupResponse.
:type: ScalingGroups
"""
self._scaling_group = scaling_group
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowScalingGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.587156 | 81 | 0.573994 |
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowScalingGroupResponse(SdkResponse):
sensitive_list = []
openapi_types = {
'scaling_group': 'ScalingGroups'
}
attribute_map = {
'scaling_group': 'scaling_group'
}
def __init__(self, scaling_group=None):
super(ShowScalingGroupResponse, self).__init__()
self._scaling_group = None
self.discriminator = None
if scaling_group is not None:
self.scaling_group = scaling_group
@property
def scaling_group(self):
return self._scaling_group
@scaling_group.setter
def scaling_group(self, scaling_group):
self._scaling_group = scaling_group
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ShowScalingGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c39addef70fc81489f275799f6061afc78af289 | 861 | py | Python | submissions/intro-heuristics/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/intro-heuristics/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/intro-heuristics/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | def main():
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from copy import deepcopy
d = int(readline())
c = list(map(int, readline().split()))
s = [list(map(int, readline().split())) for _ in range(d)]
memo = [0] * 26
check = 0
for i in range(d):
ans = 0
tmp = deepcopy(check)
check = -float('inf')
for idx, ss in enumerate(s[i]):
v = tmp + ss
for j, (m, cc) in enumerate(zip(memo, c)):
if j == idx:
continue
v -= cc * (i + 1 - m)
if check < v:
ans = idx + 1
check = v
print(ans)
memo[ans - 1] = i + 1
if __name__ == '__main__':
main()
| 25.323529 | 62 | 0.475029 | def main():
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from copy import deepcopy
d = int(readline())
c = list(map(int, readline().split()))
s = [list(map(int, readline().split())) for _ in range(d)]
memo = [0] * 26
check = 0
for i in range(d):
ans = 0
tmp = deepcopy(check)
check = -float('inf')
for idx, ss in enumerate(s[i]):
v = tmp + ss
for j, (m, cc) in enumerate(zip(memo, c)):
if j == idx:
continue
v -= cc * (i + 1 - m)
if check < v:
ans = idx + 1
check = v
print(ans)
memo[ans - 1] = i + 1
if __name__ == '__main__':
main()
| true | true |
1c39adee80341aa646f0761cff680a6923e9d891 | 1,692 | py | Python | tests/pytests/integration/ssh/test_pillar.py | babs/salt | c536ea716d5308880b244e7980f4b659d86fc104 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | tests/pytests/integration/ssh/test_pillar.py | babs/salt | c536ea716d5308880b244e7980f4b659d86fc104 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | tests/pytests/integration/ssh/test_pillar.py | babs/salt | c536ea716d5308880b244e7980f4b659d86fc104 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | import pytest
pytestmark = [
pytest.mark.skip_on_windows(reason="salt-ssh not available on Windows"),
]
@pytest.fixture(scope="module")
def pillar_tree(base_env_pillar_tree_root_dir):
top_file = """
base:
'localhost':
- basic
'127.0.0.1':
- basic
"""
basic_pillar_file = """
monty: python
knights:
- Lancelot
- Galahad
- Bedevere
- Robin
"""
top_tempfile = pytest.helpers.temp_file(
"top.sls", top_file, base_env_pillar_tree_root_dir
)
basic_tempfile = pytest.helpers.temp_file(
"basic.sls", basic_pillar_file, base_env_pillar_tree_root_dir
)
with top_tempfile, basic_tempfile:
yield
@pytest.mark.slow_test
def test_pillar_items(salt_ssh_cli, pillar_tree):
"""
test pillar.items with salt-ssh
"""
ret = salt_ssh_cli.run("pillar.items")
assert ret.exitcode == 0
assert ret.json
pillar_items = ret.json
assert "monty" in pillar_items
assert pillar_items["monty"] == "python"
assert "knights" in pillar_items
assert pillar_items["knights"] == ["Lancelot", "Galahad", "Bedevere", "Robin"]
@pytest.mark.slow_test
def test_pillar_get(salt_ssh_cli, pillar_tree):
"""
test pillar.get with salt-ssh
"""
ret = salt_ssh_cli.run("pillar.get", "monty")
assert ret.exitcode == 0
assert ret.json
assert ret.json == "python"
@pytest.mark.slow_test
def test_pillar_get_doesnotexist(salt_ssh_cli, pillar_tree):
"""
test pillar.get when pillar does not exist with salt-ssh
"""
ret = salt_ssh_cli.run("pillar.get", "doesnotexist")
assert ret.exitcode == 0
assert ret.json == ""
| 24.171429 | 82 | 0.653664 | import pytest
pytestmark = [
pytest.mark.skip_on_windows(reason="salt-ssh not available on Windows"),
]
@pytest.fixture(scope="module")
def pillar_tree(base_env_pillar_tree_root_dir):
top_file = """
base:
'localhost':
- basic
'127.0.0.1':
- basic
"""
basic_pillar_file = """
monty: python
knights:
- Lancelot
- Galahad
- Bedevere
- Robin
"""
top_tempfile = pytest.helpers.temp_file(
"top.sls", top_file, base_env_pillar_tree_root_dir
)
basic_tempfile = pytest.helpers.temp_file(
"basic.sls", basic_pillar_file, base_env_pillar_tree_root_dir
)
with top_tempfile, basic_tempfile:
yield
@pytest.mark.slow_test
def test_pillar_items(salt_ssh_cli, pillar_tree):
ret = salt_ssh_cli.run("pillar.items")
assert ret.exitcode == 0
assert ret.json
pillar_items = ret.json
assert "monty" in pillar_items
assert pillar_items["monty"] == "python"
assert "knights" in pillar_items
assert pillar_items["knights"] == ["Lancelot", "Galahad", "Bedevere", "Robin"]
@pytest.mark.slow_test
def test_pillar_get(salt_ssh_cli, pillar_tree):
ret = salt_ssh_cli.run("pillar.get", "monty")
assert ret.exitcode == 0
assert ret.json
assert ret.json == "python"
@pytest.mark.slow_test
def test_pillar_get_doesnotexist(salt_ssh_cli, pillar_tree):
ret = salt_ssh_cli.run("pillar.get", "doesnotexist")
assert ret.exitcode == 0
assert ret.json == ""
| true | true |
1c39ae3478a2d275b999096699a1f31a4a998cb7 | 9,008 | py | Python | pysnmp/proto/api/v1.py | spruning/pysnmp | 023b707f83bb1805dafdcd38ea5b1adfbf83d7e9 | [
"BSD-2-Clause"
] | null | null | null | pysnmp/proto/api/v1.py | spruning/pysnmp | 023b707f83bb1805dafdcd38ea5b1adfbf83d7e9 | [
"BSD-2-Clause"
] | null | null | null | pysnmp/proto/api/v1.py | spruning/pysnmp | 023b707f83bb1805dafdcd38ea5b1adfbf83d7e9 | [
"BSD-2-Clause"
] | null | null | null | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
from pyasn1.type import univ
from pysnmp.proto import rfc1155, rfc1157, error
from pysnmp import nextid
# Shortcuts to SNMP types
Integer = univ.Integer
OctetString = univ.OctetString
Null = univ.Null
null = Null('')
ObjectIdentifier = univ.ObjectIdentifier
IpAddress = rfc1155.IpAddress
NetworkAddress = rfc1155.NetworkAddress
Counter = rfc1155.Counter
Gauge = rfc1155.Gauge
TimeTicks = rfc1155.TimeTicks
Opaque = rfc1155.Opaque
VarBind = rfc1157.VarBind
VarBindList = rfc1157.VarBindList
GetRequestPDU = rfc1157.GetRequestPDU
GetNextRequestPDU = rfc1157.GetNextRequestPDU
GetResponsePDU = rfc1157.GetResponsePDU
SetRequestPDU = rfc1157.SetRequestPDU
TrapPDU = rfc1157.TrapPDU
Message = rfc1157.Message
class VarBindAPI(object):
@staticmethod
def setOIDVal(varBind, oidVal):
(oid, val) = oidVal
varBind.setComponentByPosition(0, oid)
if val is None:
val = null
varBind.setComponentByPosition(1).getComponentByPosition(1).setComponentByType(val.getTagSet(), val, 1,
verifyConstraints=False)
return varBind
@staticmethod
def getOIDVal(varBind):
return varBind[0], varBind[1].getComponent(1)
apiVarBind = VarBindAPI()
getNextRequestID = nextid.Integer(0xffffff)
class PDUAPI(object):
_errorStatus = rfc1157.errorStatus.clone(0)
_errorIndex = Integer(0)
def setDefaults(self, pdu):
pdu.setComponentByPosition(
0, getNextRequestID(), verifyConstraints=False
)
pdu.setComponentByPosition(
1, self._errorStatus, verifyConstraints=False
)
pdu.setComponentByPosition(
2, self._errorIndex, verifyConstraints=False
)
pdu.setComponentByPosition(3)
@staticmethod
def getRequestID(pdu):
return pdu.getComponentByPosition(0)
@staticmethod
def setRequestID(pdu, value):
pdu.setComponentByPosition(0, value)
@staticmethod
def getErrorStatus(pdu):
return pdu.getComponentByPosition(1)
@staticmethod
def setErrorStatus(pdu, value):
pdu.setComponentByPosition(1, value)
@staticmethod
def getErrorIndex(pdu, muteErrors=False):
errorIndex = pdu.getComponentByPosition(2)
if errorIndex > len(pdu[3]):
if muteErrors:
return errorIndex.clone(len(pdu[3]))
raise error.ProtocolError(
'Error index out of range: %s > %s' % (errorIndex, len(pdu[3]))
)
return errorIndex
@staticmethod
def setErrorIndex(pdu, value):
pdu.setComponentByPosition(2, value)
def setEndOfMibError(self, pdu, errorIndex):
self.setErrorIndex(pdu, errorIndex)
self.setErrorStatus(pdu, 2)
def setNoSuchInstanceError(self, pdu, errorIndex):
self.setEndOfMibError(pdu, errorIndex)
@staticmethod
def getVarBindList(pdu):
return pdu.getComponentByPosition(3)
@staticmethod
def setVarBindList(pdu, varBindList):
pdu.setComponentByPosition(3, varBindList)
@staticmethod
def getVarBinds(pdu):
varBinds = []
for varBind in pdu.getComponentByPosition(3):
varBinds.append(apiVarBind.getOIDVal(varBind))
return varBinds
@staticmethod
def setVarBinds(pdu, varBinds):
varBindList = pdu.setComponentByPosition(3).getComponentByPosition(3)
varBindList.clear()
idx = 0
for varBind in varBinds:
if isinstance(varBind, VarBind):
varBindList.setComponentByPosition(idx, varBind)
else:
varBindList.setComponentByPosition(idx)
apiVarBind.setOIDVal(
varBindList.getComponentByPosition(idx), varBind
)
idx += 1
def getResponse(self, reqPDU):
rspPDU = GetResponsePDU()
self.setDefaults(rspPDU)
self.setRequestID(rspPDU, self.getRequestID(reqPDU))
return rspPDU
def getVarBindTable(self, reqPDU, rspPDU):
if apiPDU.getErrorStatus(rspPDU) == 2:
varBindRow = []
for varBind in apiPDU.getVarBinds(reqPDU):
varBindRow.append((varBind[0], null))
return [varBindRow]
else:
return [apiPDU.getVarBinds(rspPDU)]
apiPDU = PDUAPI()
class TrapPDUAPI(object):
_networkAddress = None
_entOid = ObjectIdentifier((1, 3, 6, 1, 4, 1, 20408))
_genericTrap = rfc1157.genericTrap.clone('coldStart')
_zeroInt = univ.Integer(0)
_zeroTime = TimeTicks(0)
def setDefaults(self, pdu):
if self._networkAddress is None:
try:
import socket
agentAddress = IpAddress(socket.gethostbyname(socket.gethostname()))
except Exception:
agentAddress = IpAddress('0.0.0.0')
self._networkAddress = NetworkAddress().setComponentByPosition(0, agentAddress)
pdu.setComponentByPosition(0, self._entOid, verifyConstraints=False)
pdu.setComponentByPosition(1, self._networkAddress, verifyConstraints=False)
pdu.setComponentByPosition(2, self._genericTrap, verifyConstraints=False)
pdu.setComponentByPosition(3, self._zeroInt, verifyConstraints=False)
pdu.setComponentByPosition(4, self._zeroTime, verifyConstraints=False)
pdu.setComponentByPosition(5)
@staticmethod
def getEnterprise(pdu):
return pdu.getComponentByPosition(0)
@staticmethod
def setEnterprise(pdu, value):
pdu.setComponentByPosition(0, value)
@staticmethod
def getAgentAddr(pdu):
return pdu.getComponentByPosition(1).getComponentByPosition(0)
@staticmethod
def setAgentAddr(pdu, value):
pdu.setComponentByPosition(1).getComponentByPosition(1).setComponentByPosition(0, value)
@staticmethod
def getGenericTrap(pdu):
return pdu.getComponentByPosition(2)
@staticmethod
def setGenericTrap(pdu, value):
pdu.setComponentByPosition(2, value)
@staticmethod
def getSpecificTrap(pdu):
return pdu.getComponentByPosition(3)
@staticmethod
def setSpecificTrap(pdu, value):
pdu.setComponentByPosition(3, value)
@staticmethod
def getTimeStamp(pdu):
return pdu.getComponentByPosition(4)
@staticmethod
def setTimeStamp(pdu, value):
pdu.setComponentByPosition(4, value)
@staticmethod
def getVarBindList(pdu):
return pdu.getComponentByPosition(5)
@staticmethod
def setVarBindList(pdu, varBindList):
pdu.setComponentByPosition(5, varBindList)
@staticmethod
def getVarBinds(pdu):
varBinds = []
for varBind in pdu.getComponentByPosition(5):
varBinds.append(apiVarBind.getOIDVal(varBind))
return varBinds
@staticmethod
def setVarBinds(pdu, varBinds):
varBindList = pdu.setComponentByPosition(5).getComponentByPosition(5)
varBindList.clear()
idx = 0
for varBind in varBinds:
if isinstance(varBind, VarBind):
varBindList.setComponentByPosition(idx, varBind)
else:
varBindList.setComponentByPosition(idx)
apiVarBind.setOIDVal(
varBindList.getComponentByPosition(idx), varBind
)
idx += 1
apiTrapPDU = TrapPDUAPI()
class MessageAPI(object):
_version = rfc1157.version.clone(0)
_community = univ.OctetString('public')
def setDefaults(self, msg):
msg.setComponentByPosition(0, self._version, verifyConstraints=False)
msg.setComponentByPosition(1, self._community, verifyConstraints=False)
return msg
@staticmethod
def getVersion(msg):
return msg.getComponentByPosition(0)
@staticmethod
def setVersion(msg, value):
msg.setComponentByPosition(0, value)
@staticmethod
def getCommunity(msg):
return msg.getComponentByPosition(1)
@staticmethod
def setCommunity(msg, value):
msg.setComponentByPosition(1, value)
@staticmethod
def getPDU(msg):
return msg.getComponentByPosition(2).getComponent()
@staticmethod
def setPDU(msg, value):
msg.setComponentByPosition(2).getComponentByPosition(2).setComponentByType(value.getTagSet(), value, 1,
verifyConstraints=False)
def getResponse(self, reqMsg):
rspMsg = Message()
self.setDefaults(rspMsg)
self.setVersion(rspMsg, self.getVersion(reqMsg))
self.setCommunity(rspMsg, self.getCommunity(reqMsg))
self.setPDU(rspMsg, apiPDU.getResponse(self.getPDU(reqMsg)))
return rspMsg
apiMessage = MessageAPI()
| 30.12709 | 111 | 0.658748 |
from pyasn1.type import univ
from pysnmp.proto import rfc1155, rfc1157, error
from pysnmp import nextid
Integer = univ.Integer
OctetString = univ.OctetString
Null = univ.Null
null = Null('')
ObjectIdentifier = univ.ObjectIdentifier
IpAddress = rfc1155.IpAddress
NetworkAddress = rfc1155.NetworkAddress
Counter = rfc1155.Counter
Gauge = rfc1155.Gauge
TimeTicks = rfc1155.TimeTicks
Opaque = rfc1155.Opaque
VarBind = rfc1157.VarBind
VarBindList = rfc1157.VarBindList
GetRequestPDU = rfc1157.GetRequestPDU
GetNextRequestPDU = rfc1157.GetNextRequestPDU
GetResponsePDU = rfc1157.GetResponsePDU
SetRequestPDU = rfc1157.SetRequestPDU
TrapPDU = rfc1157.TrapPDU
Message = rfc1157.Message
class VarBindAPI(object):
@staticmethod
def setOIDVal(varBind, oidVal):
(oid, val) = oidVal
varBind.setComponentByPosition(0, oid)
if val is None:
val = null
varBind.setComponentByPosition(1).getComponentByPosition(1).setComponentByType(val.getTagSet(), val, 1,
verifyConstraints=False)
return varBind
@staticmethod
def getOIDVal(varBind):
return varBind[0], varBind[1].getComponent(1)
apiVarBind = VarBindAPI()
getNextRequestID = nextid.Integer(0xffffff)
class PDUAPI(object):
_errorStatus = rfc1157.errorStatus.clone(0)
_errorIndex = Integer(0)
def setDefaults(self, pdu):
pdu.setComponentByPosition(
0, getNextRequestID(), verifyConstraints=False
)
pdu.setComponentByPosition(
1, self._errorStatus, verifyConstraints=False
)
pdu.setComponentByPosition(
2, self._errorIndex, verifyConstraints=False
)
pdu.setComponentByPosition(3)
@staticmethod
def getRequestID(pdu):
return pdu.getComponentByPosition(0)
@staticmethod
def setRequestID(pdu, value):
pdu.setComponentByPosition(0, value)
@staticmethod
def getErrorStatus(pdu):
return pdu.getComponentByPosition(1)
@staticmethod
def setErrorStatus(pdu, value):
pdu.setComponentByPosition(1, value)
@staticmethod
def getErrorIndex(pdu, muteErrors=False):
errorIndex = pdu.getComponentByPosition(2)
if errorIndex > len(pdu[3]):
if muteErrors:
return errorIndex.clone(len(pdu[3]))
raise error.ProtocolError(
'Error index out of range: %s > %s' % (errorIndex, len(pdu[3]))
)
return errorIndex
@staticmethod
def setErrorIndex(pdu, value):
pdu.setComponentByPosition(2, value)
def setEndOfMibError(self, pdu, errorIndex):
self.setErrorIndex(pdu, errorIndex)
self.setErrorStatus(pdu, 2)
def setNoSuchInstanceError(self, pdu, errorIndex):
self.setEndOfMibError(pdu, errorIndex)
@staticmethod
def getVarBindList(pdu):
return pdu.getComponentByPosition(3)
@staticmethod
def setVarBindList(pdu, varBindList):
pdu.setComponentByPosition(3, varBindList)
@staticmethod
def getVarBinds(pdu):
varBinds = []
for varBind in pdu.getComponentByPosition(3):
varBinds.append(apiVarBind.getOIDVal(varBind))
return varBinds
@staticmethod
def setVarBinds(pdu, varBinds):
varBindList = pdu.setComponentByPosition(3).getComponentByPosition(3)
varBindList.clear()
idx = 0
for varBind in varBinds:
if isinstance(varBind, VarBind):
varBindList.setComponentByPosition(idx, varBind)
else:
varBindList.setComponentByPosition(idx)
apiVarBind.setOIDVal(
varBindList.getComponentByPosition(idx), varBind
)
idx += 1
def getResponse(self, reqPDU):
rspPDU = GetResponsePDU()
self.setDefaults(rspPDU)
self.setRequestID(rspPDU, self.getRequestID(reqPDU))
return rspPDU
def getVarBindTable(self, reqPDU, rspPDU):
if apiPDU.getErrorStatus(rspPDU) == 2:
varBindRow = []
for varBind in apiPDU.getVarBinds(reqPDU):
varBindRow.append((varBind[0], null))
return [varBindRow]
else:
return [apiPDU.getVarBinds(rspPDU)]
apiPDU = PDUAPI()
class TrapPDUAPI(object):
_networkAddress = None
_entOid = ObjectIdentifier((1, 3, 6, 1, 4, 1, 20408))
_genericTrap = rfc1157.genericTrap.clone('coldStart')
_zeroInt = univ.Integer(0)
_zeroTime = TimeTicks(0)
def setDefaults(self, pdu):
if self._networkAddress is None:
try:
import socket
agentAddress = IpAddress(socket.gethostbyname(socket.gethostname()))
except Exception:
agentAddress = IpAddress('0.0.0.0')
self._networkAddress = NetworkAddress().setComponentByPosition(0, agentAddress)
pdu.setComponentByPosition(0, self._entOid, verifyConstraints=False)
pdu.setComponentByPosition(1, self._networkAddress, verifyConstraints=False)
pdu.setComponentByPosition(2, self._genericTrap, verifyConstraints=False)
pdu.setComponentByPosition(3, self._zeroInt, verifyConstraints=False)
pdu.setComponentByPosition(4, self._zeroTime, verifyConstraints=False)
pdu.setComponentByPosition(5)
@staticmethod
def getEnterprise(pdu):
return pdu.getComponentByPosition(0)
@staticmethod
def setEnterprise(pdu, value):
pdu.setComponentByPosition(0, value)
@staticmethod
def getAgentAddr(pdu):
return pdu.getComponentByPosition(1).getComponentByPosition(0)
@staticmethod
def setAgentAddr(pdu, value):
pdu.setComponentByPosition(1).getComponentByPosition(1).setComponentByPosition(0, value)
@staticmethod
def getGenericTrap(pdu):
return pdu.getComponentByPosition(2)
@staticmethod
def setGenericTrap(pdu, value):
pdu.setComponentByPosition(2, value)
@staticmethod
def getSpecificTrap(pdu):
return pdu.getComponentByPosition(3)
@staticmethod
def setSpecificTrap(pdu, value):
pdu.setComponentByPosition(3, value)
@staticmethod
def getTimeStamp(pdu):
return pdu.getComponentByPosition(4)
@staticmethod
def setTimeStamp(pdu, value):
pdu.setComponentByPosition(4, value)
@staticmethod
def getVarBindList(pdu):
return pdu.getComponentByPosition(5)
@staticmethod
def setVarBindList(pdu, varBindList):
pdu.setComponentByPosition(5, varBindList)
@staticmethod
def getVarBinds(pdu):
varBinds = []
for varBind in pdu.getComponentByPosition(5):
varBinds.append(apiVarBind.getOIDVal(varBind))
return varBinds
@staticmethod
def setVarBinds(pdu, varBinds):
varBindList = pdu.setComponentByPosition(5).getComponentByPosition(5)
varBindList.clear()
idx = 0
for varBind in varBinds:
if isinstance(varBind, VarBind):
varBindList.setComponentByPosition(idx, varBind)
else:
varBindList.setComponentByPosition(idx)
apiVarBind.setOIDVal(
varBindList.getComponentByPosition(idx), varBind
)
idx += 1
apiTrapPDU = TrapPDUAPI()
class MessageAPI(object):
_version = rfc1157.version.clone(0)
_community = univ.OctetString('public')
def setDefaults(self, msg):
msg.setComponentByPosition(0, self._version, verifyConstraints=False)
msg.setComponentByPosition(1, self._community, verifyConstraints=False)
return msg
@staticmethod
def getVersion(msg):
return msg.getComponentByPosition(0)
@staticmethod
def setVersion(msg, value):
msg.setComponentByPosition(0, value)
@staticmethod
def getCommunity(msg):
return msg.getComponentByPosition(1)
@staticmethod
def setCommunity(msg, value):
msg.setComponentByPosition(1, value)
@staticmethod
def getPDU(msg):
return msg.getComponentByPosition(2).getComponent()
@staticmethod
def setPDU(msg, value):
msg.setComponentByPosition(2).getComponentByPosition(2).setComponentByType(value.getTagSet(), value, 1,
verifyConstraints=False)
def getResponse(self, reqMsg):
rspMsg = Message()
self.setDefaults(rspMsg)
self.setVersion(rspMsg, self.getVersion(reqMsg))
self.setCommunity(rspMsg, self.getCommunity(reqMsg))
self.setPDU(rspMsg, apiPDU.getResponse(self.getPDU(reqMsg)))
return rspMsg
apiMessage = MessageAPI()
| true | true |
1c39aeec153edc1e6962e2b55f5af78f7231ccd0 | 289 | py | Python | C+= language/c+=.py | rzhvn1/Homework-Ogogo | e7e5e48cf144f559a739a1acc7b19373034cda78 | [
"MIT"
] | null | null | null | C+= language/c+=.py | rzhvn1/Homework-Ogogo | e7e5e48cf144f559a739a1acc7b19373034cda78 | [
"MIT"
] | null | null | null | C+= language/c+=.py | rzhvn1/Homework-Ogogo | e7e5e48cf144f559a739a1acc7b19373034cda78 | [
"MIT"
] | null | null | null | num = int(input())
i = 0
while i < num:
count = 0
lst1 = list(map(int,input().split()))
a = lst1[0]
b = lst1[1]
n = lst1[2]
i += 1
while max(a,b) < n + 1:
if a < b:
a += b
else:
b += a
count += 1
print(count)
| 17 | 41 | 0.387543 | num = int(input())
i = 0
while i < num:
count = 0
lst1 = list(map(int,input().split()))
a = lst1[0]
b = lst1[1]
n = lst1[2]
i += 1
while max(a,b) < n + 1:
if a < b:
a += b
else:
b += a
count += 1
print(count)
| true | true |
1c39af69b80fed61354fb436e8fd71bdcf683c24 | 384 | py | Python | change.py | dongchirua/algorithms-101 | 4d25254e00c8003c6bba8d23dda6819771c6dec6 | [
"MIT"
] | null | null | null | change.py | dongchirua/algorithms-101 | 4d25254e00c8003c6bba8d23dda6819771c6dec6 | [
"MIT"
] | null | null | null | change.py | dongchirua/algorithms-101 | 4d25254e00c8003c6bba8d23dda6819771c6dec6 | [
"MIT"
] | null | null | null | # python2
import sys
def get_change(m, denominations=[1, 5, 10]):
result = 0
need = m
while len(denominations):
_d = denominations.pop()
ex = need // _d
if ex:
result += ex
need -= _d * ex
# write your code here
return result
if __name__ == '__main__':
n = int(sys.stdin.readline())
print(get_change(n))
| 18.285714 | 44 | 0.544271 |
import sys
def get_change(m, denominations=[1, 5, 10]):
result = 0
need = m
while len(denominations):
_d = denominations.pop()
ex = need // _d
if ex:
result += ex
need -= _d * ex
return result
if __name__ == '__main__':
n = int(sys.stdin.readline())
print(get_change(n))
| true | true |
1c39b01bd0876f6004d3849aa2e645317e05b7c5 | 1,089 | py | Python | trading_strategies/01_线性回归.py | fadeawaylove/stock-trade-system | 133762e6459745fc6c818b43729c1ffff5b9c5ad | [
"Apache-2.0"
] | 3 | 2020-11-10T03:35:05.000Z | 2021-07-04T15:18:44.000Z | trading_strategies/01_线性回归.py | fadeawaylove/stock-trade-system | 133762e6459745fc6c818b43729c1ffff5b9c5ad | [
"Apache-2.0"
] | null | null | null | trading_strategies/01_线性回归.py | fadeawaylove/stock-trade-system | 133762e6459745fc6c818b43729c1ffff5b9c5ad | [
"Apache-2.0"
] | null | null | null | import pandas_datareader.data as web
import pandas as pd
import numpy as np
import datetime
import statsmodels.api as sm
from statsmodels import regression
import matplotlib.pyplot as plt
"""
Statsmodels是Python中一个强大的统计分析包,包含了回归分析、时间序列分析、假设检验等等的功能
"""
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# 1.获取数据
df_stockload = web.DataReader("600797.SS", "yahoo", datetime.datetime(2019, 10, 1), datetime.datetime(2020, 4, 1))
df_stockload.fillna(method="bfill", inplace=True) # 后一个数据填充NAN1
print(df_stockload.info)
y_arr = df_stockload.Close.values
x_arr = np.arange(0, len(y_arr))
x_b_arr = sm.add_constant(x_arr) # 添加常数
model = regression.linear_model.OLS(y_arr, x_b_arr).fit() # 使用OLS做拟合
rad = model.params[1] # y = kx + b : params[1] = k
intercept = model.params[0]
reg_y_fit = x_arr * rad + intercept
print(model.params)
print(np.rad2deg(rad))
# 绘图
plt.plot(x_arr, y_arr)
plt.plot(x_arr, reg_y_fit, 'r')
plt.title(f"浙大网新 y = {rad} * x + {intercept}")
plt.legend(['close', 'linear'], loc='best')
plt.show()
| 25.325581 | 114 | 0.728191 | import pandas_datareader.data as web
import pandas as pd
import numpy as np
import datetime
import statsmodels.api as sm
from statsmodels import regression
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
df_stockload = web.DataReader("600797.SS", "yahoo", datetime.datetime(2019, 10, 1), datetime.datetime(2020, 4, 1))
df_stockload.fillna(method="bfill", inplace=True)
print(df_stockload.info)
y_arr = df_stockload.Close.values
x_arr = np.arange(0, len(y_arr))
x_b_arr = sm.add_constant(x_arr)
model = regression.linear_model.OLS(y_arr, x_b_arr).fit()
rad = model.params[1]
intercept = model.params[0]
reg_y_fit = x_arr * rad + intercept
print(model.params)
print(np.rad2deg(rad))
plt.plot(x_arr, y_arr)
plt.plot(x_arr, reg_y_fit, 'r')
plt.title(f"浙大网新 y = {rad} * x + {intercept}")
plt.legend(['close', 'linear'], loc='best')
plt.show()
| true | true |
1c39b121755a24fef045903cf06f39dea3bdc9bd | 2,499 | py | Python | lib/googlecloudsdk/core/console/style/text.py | kylewuolle/google-cloud-sdk | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/core/console/style/text.py | kylewuolle/google-cloud-sdk | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/core/console/style/text.py | kylewuolle/google-cloud-sdk | 75f09ebe779e99fdc3fd13b48621fe12bfaa11aa | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- #
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic text objects that are used for styled outputting."""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
class TextAttributes(object):
"""Attributes to use to style text with."""
def __init__(self, format_str=None, color=None, attrs=None):
"""Defines a set of attributes for a piece of text.
Args:
format_str: (str), string that will be used to format the text
with. For example '[{}]', to enclose text in brackets.
color: (Colors), the color the text should be formatted with.
attrs: (Attrs), the attributes to apply to text.
"""
self._format_str = format_str
self._color = color
self._attrs = attrs or []
@property
def format_str(self):
return self._format_str
@property
def color(self):
return self._color
@property
def attrs(self):
return self._attrs
class TypedText(object):
"""Text with a semantic type that will be used for styling."""
def __init__(self, texts, text_type=None):
"""String of text and a corresponding type to use to style that text.
Args:
texts: (list[str]), list of strs or TypedText objects
that should be styled using text_type.
text_type: (TextTypes), the semantic type of the text that
will be used to style text.
"""
self.texts = texts
self.text_type = text_type
class _TextTypes(enum.Enum):
"""Text types base class that defines base functionality."""
def __call__(self, *args):
"""Returns a TypedText object using this style."""
return TypedText(list(args), self)
# TODO(b/113525269): Add more types.
class TextTypes(_TextTypes):
"""Defines text types that can be used for styling text."""
RESOURCE_NAME = 1
URL = 2
USER_INPUT = 3
COMMAND = 4
INFO = 5
URI = 6
OUTPUT = 7
| 28.078652 | 74 | 0.70108 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import enum
class TextAttributes(object):
def __init__(self, format_str=None, color=None, attrs=None):
self._format_str = format_str
self._color = color
self._attrs = attrs or []
@property
def format_str(self):
return self._format_str
@property
def color(self):
return self._color
@property
def attrs(self):
return self._attrs
class TypedText(object):
def __init__(self, texts, text_type=None):
self.texts = texts
self.text_type = text_type
class _TextTypes(enum.Enum):
def __call__(self, *args):
return TypedText(list(args), self)
class TextTypes(_TextTypes):
RESOURCE_NAME = 1
URL = 2
USER_INPUT = 3
COMMAND = 4
INFO = 5
URI = 6
OUTPUT = 7
| true | true |
1c39b15409ff79a595f4ad11aa536b39a5c87464 | 18,652 | py | Python | summit/strategies/entmoot.py | dswigh/summit | a1cecdd41df8119005173b46ac45fb22472628d6 | [
"MIT"
] | 60 | 2020-09-10T00:00:03.000Z | 2022-03-08T10:45:02.000Z | summit/strategies/entmoot.py | dswigh/summit | a1cecdd41df8119005173b46ac45fb22472628d6 | [
"MIT"
] | 57 | 2020-09-07T11:06:15.000Z | 2022-02-16T16:30:48.000Z | summit/strategies/entmoot.py | dswigh/summit | a1cecdd41df8119005173b46ac45fb22472628d6 | [
"MIT"
] | 12 | 2020-09-07T12:43:19.000Z | 2022-02-26T09:58:01.000Z | from summit.strategies.base import Strategy
from summit.domain import *
from summit.utils.dataset import DataSet
import string
import numpy as np
import pandas as pd
class ENTMOOT(Strategy):
"""
Single-objective Bayesian optimization, using gradient-boosted trees
instead of Gaussian processes, via ENTMOOT (ENsemble Tree MOdel Optimization Tool)
This is currently an experimental feature and requires Gurobipy to be installed.
Parameters
----------
domain: :class:`~summit.domain.Domain`
The Summit domain describing the optimization problem.
transform : :class:`~summit.strategies.base.Transform`, optional
A transform object. By default no transformation will be done
on the input variables or objectives.
estimator_type: string, optional
The ENTMOOT base_estimator type.
By default, Gradient-Boosted Regression
std_estimator_type: string, optional
The ENTMOOT std_estimator
By default, bounded data distance
acquisition_type: string, optional
The acquisition function type from ENTMOOT. See notes for options.
By default, Lower Confidence Bound.
optimizer_type: string, optional
The optimizer used in ENTMOOT for maximization of the acquisition function.
By default, sampling will be used.
generator_type: string, optional
The method for generating initial points before a model can be trained.
By default, uniform random points will be used.
initial_points: int, optional
How many points to require before training models
min_child_samples: int, optional
Minimum size of a leaf in tree models
Examples
--------
>>> from summit.domain import *
>>> from summit.strategies.entmoot import ENTMOOT
>>> import numpy as np
>>> domain = Domain()
>>> domain += ContinuousVariable(name='temperature', description='reaction temperature in celsius', bounds=[50, 100])
>>> domain += CategoricalVariable(name='flowrate_a', description='flow of reactant a in mL/min', levels=[1,2,3,4,5])
>>> domain += ContinuousVariable(name='flowrate_b', description='flow of reactant b in mL/min', bounds=[0.1, 0.5])
>>> domain += ContinuousVariable(name="yld", description='yield of reaction', bounds=[0,100], is_objective=True)
>>> # strategy = ENTMOOT(domain)
>>> # next_experiments = strategy.suggest_experiments(5)
Notes
----------
Estimator type can either by GBRT (Gradient-boosted regression trees) or RF (random forest from scikit-learn).
Acquisition function type can only be LCB (lower confidence bound).
Based on the paper from [Thebelt]_ et al.
.. [Thebelt] A. Thebelt et al.
"ENTMOOT: A Framework for Optimization over Ensemble Tree Models", `ArXiv <https://arxiv.org/abs/2003.04774>`_
"""
def __init__(
self,
domain,
transform=None,
estimator_type=None,
std_estimator_type=None,
acquisition_type=None,
optimizer_type=None,
generator_type=None,
initial_points=50,
min_child_samples=5,
**kwargs
):
Strategy.__init__(self, domain, transform=transform, **kwargs)
self.use_descriptors = kwargs.get("use_descriptors", False)
# TODO: notation - discrete in our model (e.g., catalyst type) = categorical?
self.input_domain = []
for v in self.domain.variables:
if not v.is_objective:
if isinstance(v, ContinuousVariable):
self.input_domain.append(
{
"name": v.name,
"type": v.variable_type,
"domain": (v.bounds[0], v.bounds[1]),
}
)
elif isinstance(v, CategoricalVariable):
raise ValueError(
"Categorical Variables are not yet implemented "
"for ENTMOOT strategy."
)
if not self.use_descriptors:
self.input_domain.append(
{
"name": v.name,
"type": "categorical",
"domain": tuple(self.categorical_wrapper(v.levels)),
}
)
elif v.ds is not None and self.use_descriptors:
if v.ds is None:
raise ValueError(
"No descriptors provided for variable: {}".format(
v.name
)
)
descriptor_names = v.ds.data_columns
descriptors = np.asarray(
[
v.ds.loc[:, [l]].values.tolist()
for l in v.ds.data_columns
]
)
for j, d in enumerate(descriptors):
self.input_domain.append(
{
"name": descriptor_names[j],
"type": "continuous",
"domain": (
np.min(np.asarray(d)),
np.max(np.asarray(d)),
),
}
)
elif v.ds is None and self.use_descriptors:
raise ValueError(
"Cannot use descriptors because none are provided."
)
# TODO: GPyOpt currently does not support mixed-domains w/ bandit inputs, there is a PR for this though
else:
raise TypeError("Unknown variable type.")
# TODO: how to handle equality constraints? Could we remove '==' from constraint types as each equality
# constraint reduces the degrees of freedom?
if len(self.domain.constraints) != 0:
self.constraints = self.constr_wrapper(self.domain)
else:
self.constraints = None
self.input_dim = len(self.domain.input_variables)
if estimator_type in [
"GBRT",
"RF",
]:
self.estimator_type = estimator_type
else:
self.estimator_type = "GBRT" # default model type is GB trees
if std_estimator_type in [
"BDD",
"L1BDD",
"DDP",
"L1DDP",
]:
self.std_estimator_type = std_estimator_type
else:
self.std_estimator_type = (
"BDD" # default model type is bounded data distance
)
if acquisition_type in [
"LCB",
]:
self.acquisition_type = acquisition_type
else:
self.acquisition_type = (
"LCB" # default acquisition function is lower confidence bound
)
"""
Method for optimization of acquisition function
sampling: optimized by computing `acquisition_type` at `n_points`
randomly sampled points
global: optimized by using global solver to find minimum of
`acquisition_type`. Requires gurobipy
"""
if optimizer_type in ["sampling", "global"]:
self.optimizer_type = optimizer_type
else:
self.optimizer_type = "sampling" # default optimizer: sampling
if (self.optimizer_type == "sampling") & (self.constraints is not None):
raise ValueError(
"Constraints can only be applied when ENTMOOT is using"
"global solver. Set optimizer_type = global or remove"
"constraints."
)
import pkg_resources
required = {"gurobipy"}
installed = {pkg.key for pkg in pkg_resources.working_set}
self.gurobi_missing = required - installed
"""
Sets an initial points generator. Can be either
- "random" for uniform random numbers,
- "sobol" for a Sobol sequence,
- "halton" for a Halton sequence,
- "hammersly" for a Hammersly sequence,
- "lhs" for a latin hypercube sequence,
- "grid" for a uniform grid sequence
"""
if generator_type in [
"random",
"sobol",
"halton",
"hammersly",
"lhs",
"grid",
]:
self.generator_type = generator_type
else:
self.generator_type = "random"
self.initial_points = initial_points
self.min_child_samples = min_child_samples
self.prev_param = None
def suggest_experiments(
self, num_experiments=1, prev_res: DataSet = None, **kwargs
):
"""Suggest experiments using ENTMOOT tree-based Bayesian Optimization
Parameters
----------
num_experiments: int, optional
The number of experiments (i.e., samples) to generate. Default is 1.
prev_res: :class:`~summit.utils.data.DataSet`, optional
Dataset with data from previous experiments of previous iteration.
If no data is passed, then random sampling will
be used to suggest an initial design.
Returns
-------
next_experiments : :class:`~summit.utils.data.DataSet`
A Dataset object with the suggested experiments
"""
from entmoot.optimizer.optimizer import Optimizer
from entmoot.space.space import Space
param = None
xbest = np.zeros(self.domain.num_continuous_dimensions())
obj = self.domain.output_variables[0]
objective_dir = -1.0 if obj.maximize else 1.0
fbest = float("inf")
bounds = [k["domain"] for k in self.input_domain]
space = Space(bounds)
if not self.gurobi_missing:
from gurobipy import LinExpr
from entmoot.optimizer.gurobi_utils import get_core_gurobi_model
core_model = get_core_gurobi_model(space)
gvars = core_model.getVars()
for c in self.constraints:
left = LinExpr()
left.addTerms(c[0], gvars)
left.addConstant(c[1])
core_model.addLConstr(left, c[2], 0)
core_model.update()
acq_optimizer_kwargs = {"add_model_core": core_model}
else:
acq_optimizer_kwargs = None
entmoot_model = Optimizer(
dimensions=bounds,
base_estimator=self.estimator_type,
std_estimator=self.std_estimator_type,
n_initial_points=self.initial_points,
initial_point_generator=self.generator_type,
acq_func=self.acquisition_type,
acq_optimizer=self.optimizer_type,
random_state=None,
acq_func_kwargs=None,
acq_optimizer_kwargs=acq_optimizer_kwargs,
base_estimator_kwargs={"min_child_samples": self.min_child_samples},
std_estimator_kwargs=None,
model_queue_size=None,
verbose=False,
)
# If we have previous results:
if prev_res is not None:
# Get inputs and outputs
inputs, outputs = self.transform.transform_inputs_outputs(
prev_res, transform_descriptors=self.use_descriptors
)
# Set up maximization and minimization by converting maximization to minimization problem
for v in self.domain.variables:
if v.is_objective and v.maximize:
outputs[v.name] = -1 * outputs[v.name]
if isinstance(v, CategoricalVariable):
if not self.use_descriptors:
inputs[v.name] = self.categorical_wrapper(
inputs[v.name], v.levels
)
inputs = inputs.to_numpy()
outputs = outputs.to_numpy()
if self.prev_param is not None:
X_step = self.prev_param[0]
Y_step = self.prev_param[1]
X_step = np.vstack((X_step, inputs))
Y_step = np.vstack((Y_step, outputs))
else:
X_step = inputs
Y_step = outputs
# Convert to list form to give to optimizer
prev_X = [list(x) for x in X_step]
prev_y = [y for x in Y_step for y in x]
# Train entmoot model
entmoot_model.tell(prev_X, prev_y, fit=True)
# Store parameters (history of suggested points and function evaluations)
param = [X_step, Y_step]
fbest = np.min(Y_step)
xbest = X_step[np.argmin(Y_step)]
request = np.array(
entmoot_model.ask(n_points=num_experiments, strategy="cl_mean")
)
# Generate DataSet object with variable values of next
next_experiments = None
transform_descriptors = False
if request is not None and len(request) != 0:
next_experiments = {}
i_inp = 0
for v in self.domain.variables:
if not v.is_objective:
if isinstance(v, CategoricalVariable):
if v.ds is None or not self.use_descriptors:
cat_list = []
for j, entry in enumerate(request[:, i_inp]):
cat_list.append(
self.categorical_unwrap(entry, v.levels)
)
next_experiments[v.name] = np.asarray(cat_list)
i_inp += 1
else:
descriptor_names = v.ds.data_columns
for d in descriptor_names:
next_experiments[d] = request[:, i_inp]
i_inp += 1
transform_descriptors = True
else:
next_experiments[v.name] = request[:, i_inp]
i_inp += 1
next_experiments = DataSet.from_df(pd.DataFrame(data=next_experiments))
next_experiments[("strategy", "METADATA")] = "ENTMOOT"
self.fbest = objective_dir * fbest
self.xbest = xbest
self.prev_param = param
# Do any necessary transformation back
next_experiments = self.transform.un_transform(
next_experiments, transform_descriptors=self.use_descriptors
)
return next_experiments
def reset(self):
"""Reset the internal parameters"""
self.prev_param = None
def constr_wrapper(self, summit_domain):
v_input_names = [v.name for v in summit_domain.variables if not v.is_objective]
constraints = []
for c in summit_domain.constraints:
tmp_c = c.lhs
# Split LHS on + signs into fragments
tmp_p = str.split(tmp_c, "+")
tmp_a = []
for t in tmp_p:
# For each of the fragments, split on -
terms = str.split(t, "-")
for i in range(len(terms)):
if i == 0:
# If the first part in the fragment is not empty, that
# means the first term was positive.
if terms[0] != "":
tmp_a.append(terms[0])
# All of the terms in the split will have
# negative coefficients.
else:
tmp_a.append("-" + terms[i])
# Split the terms into coefficients and variables:
constraint_dict = dict()
for term in tmp_a:
for i, char in enumerate(term):
if char in string.ascii_letters:
index = i
c_variable = term[index:]
if term[:index] == "":
c_coeff = 1.0
elif term[:index] == "-":
c_coeff = -1.0
else:
c_coeff = float(term[:index])
break
else:
c_variable = "constant"
c_coeff = term
constraint_dict[c_variable] = c_coeff
# Place coefficients in the variable order the model expects.
constraints_ordered = []
for v_input_index, v_input_name in enumerate(v_input_names):
constraints_ordered.append(constraint_dict.get(v_input_name, 0))
constraints.append(
[constraints_ordered, constraint_dict["constant"], c.constraint_type]
)
return constraints
def to_dict(self):
if self.prev_param is not None:
param = [self.prev_param[0].tolist(), self.prev_param[1].tolist()]
else:
param = None
strategy_params = dict(
prev_param=param,
use_descriptors=self.use_descriptors,
estimator_type=self.estimator_type,
std_estimator_type=self.std_estimator_type,
acquisition_type=self.acquisition_type,
optimizer_type=self.optimizer_type,
generator_type=self.generator_type,
initial_points=self.initial_points,
min_child_samples=self.min_child_samples,
)
return super().to_dict(**strategy_params)
@classmethod
def from_dict(cls, d):
# Setup ENTMOOT
entmoot = super().from_dict(d)
param = d["strategy_params"]["prev_param"]
if param is not None:
param = [np.array(param[0]), np.array(param[1])]
entmoot.prev_param = param
return entmoot
"""
def categorical_wrapper(self, categories, reference_categories=None):
if not reference_categories:
return [i for i, _ in enumerate(categories)]
else:
return [reference_categories.index(c) for c in categories]
def categorical_unwrap(self, gpyopt_level, categories):
return categories[int(gpyopt_level)]
"""
| 38.939457 | 123 | 0.541122 | from summit.strategies.base import Strategy
from summit.domain import *
from summit.utils.dataset import DataSet
import string
import numpy as np
import pandas as pd
class ENTMOOT(Strategy):
def __init__(
self,
domain,
transform=None,
estimator_type=None,
std_estimator_type=None,
acquisition_type=None,
optimizer_type=None,
generator_type=None,
initial_points=50,
min_child_samples=5,
**kwargs
):
Strategy.__init__(self, domain, transform=transform, **kwargs)
self.use_descriptors = kwargs.get("use_descriptors", False)
self.input_domain = []
for v in self.domain.variables:
if not v.is_objective:
if isinstance(v, ContinuousVariable):
self.input_domain.append(
{
"name": v.name,
"type": v.variable_type,
"domain": (v.bounds[0], v.bounds[1]),
}
)
elif isinstance(v, CategoricalVariable):
raise ValueError(
"Categorical Variables are not yet implemented "
"for ENTMOOT strategy."
)
if not self.use_descriptors:
self.input_domain.append(
{
"name": v.name,
"type": "categorical",
"domain": tuple(self.categorical_wrapper(v.levels)),
}
)
elif v.ds is not None and self.use_descriptors:
if v.ds is None:
raise ValueError(
"No descriptors provided for variable: {}".format(
v.name
)
)
descriptor_names = v.ds.data_columns
descriptors = np.asarray(
[
v.ds.loc[:, [l]].values.tolist()
for l in v.ds.data_columns
]
)
for j, d in enumerate(descriptors):
self.input_domain.append(
{
"name": descriptor_names[j],
"type": "continuous",
"domain": (
np.min(np.asarray(d)),
np.max(np.asarray(d)),
),
}
)
elif v.ds is None and self.use_descriptors:
raise ValueError(
"Cannot use descriptors because none are provided."
)
else:
raise TypeError("Unknown variable type.")
if len(self.domain.constraints) != 0:
self.constraints = self.constr_wrapper(self.domain)
else:
self.constraints = None
self.input_dim = len(self.domain.input_variables)
if estimator_type in [
"GBRT",
"RF",
]:
self.estimator_type = estimator_type
else:
self.estimator_type = "GBRT"
if std_estimator_type in [
"BDD",
"L1BDD",
"DDP",
"L1DDP",
]:
self.std_estimator_type = std_estimator_type
else:
self.std_estimator_type = (
"BDD"
)
if acquisition_type in [
"LCB",
]:
self.acquisition_type = acquisition_type
else:
self.acquisition_type = (
"LCB"
)
if optimizer_type in ["sampling", "global"]:
self.optimizer_type = optimizer_type
else:
self.optimizer_type = "sampling"
if (self.optimizer_type == "sampling") & (self.constraints is not None):
raise ValueError(
"Constraints can only be applied when ENTMOOT is using"
"global solver. Set optimizer_type = global or remove"
"constraints."
)
import pkg_resources
required = {"gurobipy"}
installed = {pkg.key for pkg in pkg_resources.working_set}
self.gurobi_missing = required - installed
if generator_type in [
"random",
"sobol",
"halton",
"hammersly",
"lhs",
"grid",
]:
self.generator_type = generator_type
else:
self.generator_type = "random"
self.initial_points = initial_points
self.min_child_samples = min_child_samples
self.prev_param = None
def suggest_experiments(
self, num_experiments=1, prev_res: DataSet = None, **kwargs
):
from entmoot.optimizer.optimizer import Optimizer
from entmoot.space.space import Space
param = None
xbest = np.zeros(self.domain.num_continuous_dimensions())
obj = self.domain.output_variables[0]
objective_dir = -1.0 if obj.maximize else 1.0
fbest = float("inf")
bounds = [k["domain"] for k in self.input_domain]
space = Space(bounds)
if not self.gurobi_missing:
from gurobipy import LinExpr
from entmoot.optimizer.gurobi_utils import get_core_gurobi_model
core_model = get_core_gurobi_model(space)
gvars = core_model.getVars()
for c in self.constraints:
left = LinExpr()
left.addTerms(c[0], gvars)
left.addConstant(c[1])
core_model.addLConstr(left, c[2], 0)
core_model.update()
acq_optimizer_kwargs = {"add_model_core": core_model}
else:
acq_optimizer_kwargs = None
entmoot_model = Optimizer(
dimensions=bounds,
base_estimator=self.estimator_type,
std_estimator=self.std_estimator_type,
n_initial_points=self.initial_points,
initial_point_generator=self.generator_type,
acq_func=self.acquisition_type,
acq_optimizer=self.optimizer_type,
random_state=None,
acq_func_kwargs=None,
acq_optimizer_kwargs=acq_optimizer_kwargs,
base_estimator_kwargs={"min_child_samples": self.min_child_samples},
std_estimator_kwargs=None,
model_queue_size=None,
verbose=False,
)
if prev_res is not None:
inputs, outputs = self.transform.transform_inputs_outputs(
prev_res, transform_descriptors=self.use_descriptors
)
for v in self.domain.variables:
if v.is_objective and v.maximize:
outputs[v.name] = -1 * outputs[v.name]
if isinstance(v, CategoricalVariable):
if not self.use_descriptors:
inputs[v.name] = self.categorical_wrapper(
inputs[v.name], v.levels
)
inputs = inputs.to_numpy()
outputs = outputs.to_numpy()
if self.prev_param is not None:
X_step = self.prev_param[0]
Y_step = self.prev_param[1]
X_step = np.vstack((X_step, inputs))
Y_step = np.vstack((Y_step, outputs))
else:
X_step = inputs
Y_step = outputs
prev_X = [list(x) for x in X_step]
prev_y = [y for x in Y_step for y in x]
entmoot_model.tell(prev_X, prev_y, fit=True)
param = [X_step, Y_step]
fbest = np.min(Y_step)
xbest = X_step[np.argmin(Y_step)]
request = np.array(
entmoot_model.ask(n_points=num_experiments, strategy="cl_mean")
)
next_experiments = None
transform_descriptors = False
if request is not None and len(request) != 0:
next_experiments = {}
i_inp = 0
for v in self.domain.variables:
if not v.is_objective:
if isinstance(v, CategoricalVariable):
if v.ds is None or not self.use_descriptors:
cat_list = []
for j, entry in enumerate(request[:, i_inp]):
cat_list.append(
self.categorical_unwrap(entry, v.levels)
)
next_experiments[v.name] = np.asarray(cat_list)
i_inp += 1
else:
descriptor_names = v.ds.data_columns
for d in descriptor_names:
next_experiments[d] = request[:, i_inp]
i_inp += 1
transform_descriptors = True
else:
next_experiments[v.name] = request[:, i_inp]
i_inp += 1
next_experiments = DataSet.from_df(pd.DataFrame(data=next_experiments))
next_experiments[("strategy", "METADATA")] = "ENTMOOT"
self.fbest = objective_dir * fbest
self.xbest = xbest
self.prev_param = param
next_experiments = self.transform.un_transform(
next_experiments, transform_descriptors=self.use_descriptors
)
return next_experiments
def reset(self):
self.prev_param = None
def constr_wrapper(self, summit_domain):
v_input_names = [v.name for v in summit_domain.variables if not v.is_objective]
constraints = []
for c in summit_domain.constraints:
tmp_c = c.lhs
tmp_p = str.split(tmp_c, "+")
tmp_a = []
for t in tmp_p:
terms = str.split(t, "-")
for i in range(len(terms)):
if i == 0:
if terms[0] != "":
tmp_a.append(terms[0])
else:
tmp_a.append("-" + terms[i])
constraint_dict = dict()
for term in tmp_a:
for i, char in enumerate(term):
if char in string.ascii_letters:
index = i
c_variable = term[index:]
if term[:index] == "":
c_coeff = 1.0
elif term[:index] == "-":
c_coeff = -1.0
else:
c_coeff = float(term[:index])
break
else:
c_variable = "constant"
c_coeff = term
constraint_dict[c_variable] = c_coeff
constraints_ordered = []
for v_input_index, v_input_name in enumerate(v_input_names):
constraints_ordered.append(constraint_dict.get(v_input_name, 0))
constraints.append(
[constraints_ordered, constraint_dict["constant"], c.constraint_type]
)
return constraints
def to_dict(self):
if self.prev_param is not None:
param = [self.prev_param[0].tolist(), self.prev_param[1].tolist()]
else:
param = None
strategy_params = dict(
prev_param=param,
use_descriptors=self.use_descriptors,
estimator_type=self.estimator_type,
std_estimator_type=self.std_estimator_type,
acquisition_type=self.acquisition_type,
optimizer_type=self.optimizer_type,
generator_type=self.generator_type,
initial_points=self.initial_points,
min_child_samples=self.min_child_samples,
)
return super().to_dict(**strategy_params)
@classmethod
def from_dict(cls, d):
entmoot = super().from_dict(d)
param = d["strategy_params"]["prev_param"]
if param is not None:
param = [np.array(param[0]), np.array(param[1])]
entmoot.prev_param = param
return entmoot
| true | true |
1c39b292b26a8cddc4fc1986b65f20b825c12457 | 3,858 | py | Python | tests/unit/benchmark/scenarios/networking/test_netperf_node.py | kkltcjk/1026 | c9926432e43602a5c00ee6134b2a8dec5f4a7e96 | [
"Apache-2.0"
] | null | null | null | tests/unit/benchmark/scenarios/networking/test_netperf_node.py | kkltcjk/1026 | c9926432e43602a5c00ee6134b2a8dec5f4a7e96 | [
"Apache-2.0"
] | null | null | null | tests/unit/benchmark/scenarios/networking/test_netperf_node.py | kkltcjk/1026 | c9926432e43602a5c00ee6134b2a8dec5f4a7e96 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for
# yardstick.benchmark.scenarios.networking.netperf_node.NetperfNode
import mock
import unittest
import os
import json
from yardstick.benchmark.scenarios.networking import netperf_node
@mock.patch('yardstick.benchmark.scenarios.networking.netperf_node.ssh')
class NetperfNodeTestCase(unittest.TestCase):
def setUp(self):
self.ctx = {
'host': {
'ip': '192.168.10.10',
'user': 'root',
'password': 'root'
},
'target': {
'ip': '192.168.10.11',
'user': 'root',
'password': 'root'
}
}
def test_netperf_node_successful_setup(self, mock_ssh):
p = netperf_node.NetperfNode({}, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.setup()
self.assertIsNotNone(p.server)
self.assertIsNotNone(p.client)
self.assertEqual(p.setup_done, True)
def test_netperf_node_successful_no_sla(self, mock_ssh):
options = {}
args = {'options': options}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
def test_netperf_node_successful_sla(self, mock_ssh):
options = {}
args = {
'options': options,
'sla': {'mean_latency': 100}
}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
expected_result = json.loads(sample_output)
p.run(result)
self.assertEqual(result, expected_result)
def test_netperf_node_unsuccessful_sla(self, mock_ssh):
options = {}
args = {
'options': options,
'sla': {'mean_latency': 5}
}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
sample_output = self._read_sample_output()
mock_ssh.SSH().execute.return_value = (0, sample_output, '')
self.assertRaises(AssertionError, p.run, result)
def test_netperf_node_unsuccessful_script_error(self, mock_ssh):
options = {}
args = {'options': options}
result = {}
p = netperf_node.NetperfNode(args, self.ctx)
mock_ssh.SSH().execute.return_value = (0, '', '')
p.host = mock_ssh.SSH()
mock_ssh.SSH().execute.return_value = (1, '', 'FOOBAR')
self.assertRaises(RuntimeError, p.run, result)
def _read_sample_output(self):
curr_path = os.path.dirname(os.path.abspath(__file__))
output = os.path.join(curr_path, 'netperf_sample_output.json')
with open(output) as f:
sample_output = f.read()
return sample_output
def main():
unittest.main()
if __name__ == '__main__':
main()
| 30.140625 | 78 | 0.581389 | true | true | |
1c39b414b0fdb99c1db936586f63ef8042de5f94 | 3,140 | py | Python | Django Server/smart_pot/settings.py | 5Volts/Smart-Pot | 0e1e23c65e40d02ea563c686f9d53ecbd991d710 | [
"MIT"
] | 2 | 2019-03-18T02:53:19.000Z | 2019-05-01T06:47:07.000Z | Django Server/smart_pot/settings.py | 5Volts/Smart-Pot | 0e1e23c65e40d02ea563c686f9d53ecbd991d710 | [
"MIT"
] | null | null | null | Django Server/smart_pot/settings.py | 5Volts/Smart-Pot | 0e1e23c65e40d02ea563c686f9d53ecbd991d710 | [
"MIT"
] | 1 | 2020-01-17T20:20:29.000Z | 2020-01-17T20:20:29.000Z | """
Django settings for smart_pot project.
Generated by 'django-admin startproject' using Django 2.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#b*$k($wsp$tw0uf(mx%$c@xj-0r8i!ydjo@pd(s)z861cst&h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['10.0.0.178','tanrunen.hsd1.ca.comcast.net']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smart_pot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smart_pot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.950413 | 91 | 0.69586 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '#b*$k($wsp$tw0uf(mx%$c@xj-0r8i!ydjo@pd(s)z861cst&h'
DEBUG = True
ALLOWED_HOSTS = ['10.0.0.178','tanrunen.hsd1.ca.comcast.net']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smart_pot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smart_pot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| true | true |
1c39b4926af1f3f3265ab747f398e37edd4490d0 | 11,065 | py | Python | smartmon.py | chr4/node-exporter-textfile-collector-scripts | 23b0aa0d5b0db999795c752685b8b38d7a61e91a | [
"Apache-2.0"
] | 248 | 2019-08-05T11:31:01.000Z | 2022-03-30T05:59:53.000Z | smartmon.py | chr4/node-exporter-textfile-collector-scripts | 23b0aa0d5b0db999795c752685b8b38d7a61e91a | [
"Apache-2.0"
] | 72 | 2019-08-07T09:16:12.000Z | 2022-01-24T18:54:46.000Z | smartmon.py | chr4/node-exporter-textfile-collector-scripts | 23b0aa0d5b0db999795c752685b8b38d7a61e91a | [
"Apache-2.0"
] | 124 | 2019-08-04T09:45:55.000Z | 2022-03-17T00:40:13.000Z | #!/usr/bin/env python3
import argparse
import collections
import csv
import datetime
import decimal
import re
import shlex
import subprocess
import sys
device_info_re = re.compile(r'^(?P<k>[^:]+?)(?:(?:\sis|):)\s*(?P<v>.*)$')
ata_error_count_re = re.compile(
r'^Error (\d+) \[\d+\] occurred', re.MULTILINE)
self_test_re = re.compile(r'^SMART.*(PASSED|OK)$', re.MULTILINE)
device_info_map = {
'Vendor': 'vendor',
'Product': 'product',
'Revision': 'revision',
'Logical Unit id': 'lun_id',
'Model Family': 'model_family',
'Device Model': 'device_model',
'Serial Number': 'serial_number',
'Firmware Version': 'firmware_version',
}
smart_attributes_whitelist = {
'airflow_temperature_cel',
'command_timeout',
'current_pending_sector',
'end_to_end_error',
'erase_fail_count_total',
'g_sense_error_rate',
'hardware_ecc_recovered',
'host_reads_mib',
'host_reads_32mib',
'host_writes_mib',
'host_writes_32mib',
'load_cycle_count',
'media_wearout_indicator',
'wear_leveling_count',
'nand_writes_1gib',
'offline_uncorrectable',
'power_cycle_count',
'power_on_hours',
'program_fail_count',
'raw_read_error_rate',
'reallocated_event_count',
'reallocated_sector_ct',
'reported_uncorrect',
'sata_downshift_count',
'seek_error_rate',
'spin_retry_count',
'spin_up_time',
'start_stop_count',
'temperature_case',
'temperature_celsius',
'temperature_internal',
'total_lbas_read',
'total_lbas_written',
'udma_crc_error_count',
'unsafe_shutdown_count',
'workld_host_reads_perc',
'workld_media_wear_indic',
'workload_minutes',
}
Metric = collections.namedtuple('Metric', 'name labels value')
SmartAttribute = collections.namedtuple('SmartAttribute', [
'id', 'name', 'flag', 'value', 'worst', 'threshold', 'type', 'updated',
'when_failed', 'raw_value',
])
class Device(collections.namedtuple('DeviceBase', 'path opts')):
"""Representation of a device as found by smartctl --scan output."""
@property
def type(self):
return self.opts.type
@property
def base_labels(self):
return {'device': self.path, 'disk': self.type.partition('+')[2] or '0'}
def smartctl_select(self):
return ['--device', self.type, self.path]
def metric_key(metric, prefix=''):
return '{prefix}{metric.name}'.format(prefix=prefix, metric=metric)
def metric_format(metric, prefix=''):
key = metric_key(metric, prefix)
labels = ','.join(
'{k}="{v}"'.format(k=k, v=v.replace('"', '\\"')) for k, v in metric.labels.items())
value = decimal.Decimal(metric.value)
return '{key}{{{labels}}} {value}'.format(
key=key, labels=labels, value=value)
def metric_print_meta(metric, prefix=''):
key = metric_key(metric, prefix)
print('# HELP {key} SMART metric {metric.name}'.format(
key=key, metric=metric))
print('# TYPE {key} gauge'.format(key=key))
def metric_print(metric, prefix=''):
print(metric_format(metric, prefix))
def smart_ctl(*args, check=True):
"""Wrapper around invoking the smartctl binary.
Returns:
(str) Data piped to stdout by the smartctl subprocess.
"""
return subprocess.run(
['smartctl', *args], stdout=subprocess.PIPE, check=check
).stdout.decode('utf-8')
def smart_ctl_version():
return smart_ctl('-V').split('\n')[0].split()[1]
def find_devices():
"""Find SMART devices.
Yields:
(Device) Single device found by smartctl.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', dest='type')
devices = smart_ctl('--scan-open')
for device in devices.split('\n'):
device = device.strip()
if not device:
continue
tokens = shlex.split(device, comments=True)
if not tokens:
continue
yield Device(tokens[0], parser.parse_args(tokens[1:]))
def device_is_active(device):
"""Returns whenever the given device is currently active or not.
Args:
device: (Device) Device in question.
Returns:
(bool) True if the device is active and False otherwise.
"""
try:
smart_ctl('--nocheck', 'standby', *device.smartctl_select())
except subprocess.CalledProcessError:
return False
return True
def device_info(device):
"""Query device for basic model information.
Args:
device: (Device) Device in question.
Returns:
(generator): Generator yielding:
key (str): Key describing the value.
value (str): Actual value.
"""
info_lines = smart_ctl(
'--info', *device.smartctl_select()
).strip().split('\n')[3:]
matches = (device_info_re.match(line) for line in info_lines)
return (m.groups() for m in matches if m is not None)
def device_smart_capabilities(device):
"""Returns SMART capabilities of the given device.
Args:
device: (Device) Device in question.
Returns:
(tuple): tuple containing:
(bool): True whenever SMART is available, False otherwise.
(bool): True whenever SMART is enabled, False otherwise.
"""
groups = device_info(device)
state = {
g[1].split(' ', 1)[0]
for g in groups if g[0] == 'SMART support'}
smart_available = 'Available' in state
smart_enabled = 'Enabled' in state
return smart_available, smart_enabled
def collect_device_info(device):
"""Collect basic device information.
Args:
device: (Device) Device in question.
Yields:
(Metric) metrics describing general device information.
"""
values = dict(device_info(device))
yield Metric('device_info', {
**device.base_labels,
**{v: values[k] for k, v in device_info_map.items() if k in values}
}, True)
def collect_device_health_self_assessment(device):
"""Collect metric about the device health self assessment.
Args:
device: (Device) Device in question.
Yields:
(Metric) Device health self assessment.
"""
out = smart_ctl('--health', *device.smartctl_select(), check=False)
self_assessment_passed = bool(self_test_re.search(out))
yield Metric(
'device_smart_healthy', device.base_labels, self_assessment_passed)
def collect_ata_metrics(device):
# Fetch SMART attributes for the given device.
attributes = smart_ctl(
'--attributes', *device.smartctl_select()
)
# replace multiple occurrences of whitespace with a single whitespace
# so that the CSV Parser recognizes individual columns properly.
attributes = re.sub(r'[\t\x20]+', ' ', attributes)
# Turn smartctl output into a list of lines and skip to the table of
# SMART attributes.
attribute_lines = attributes.strip().split('\n')[7:]
# Some attributes have multiple IDs but have the same name. Don't
# yield attributes that already have been reported before.
seen = set()
reader = csv.DictReader(
(line.strip() for line in attribute_lines),
fieldnames=SmartAttribute._fields[:-1],
restkey=SmartAttribute._fields[-1], delimiter=' ')
for entry in reader:
# We're only interested in the SMART attributes that are
# whitelisted here.
entry['name'] = entry['name'].lower()
if entry['name'] not in smart_attributes_whitelist:
continue
# Ensure that only the numeric parts are fetched from the raw_value.
# Attributes such as 194 Temperature_Celsius reported by my SSD
# are in the format of "36 (Min/Max 24/40)" which can't be expressed
# properly as a prometheus metric.
m = re.match(r'^(\d+)', ' '.join(entry['raw_value']))
if not m:
continue
entry['raw_value'] = m.group(1)
# Some device models report "---" in the threshold value where most
# devices would report "000". We do the substitution here because
# downstream code expects values to be convertable to integer.
if entry['threshold'] == '---':
entry['threshold'] = '0'
if entry['name'] in smart_attributes_whitelist and entry['name'] not in seen:
labels = {
'name': entry['name'],
**device.base_labels,
}
for col in 'value', 'worst', 'threshold', 'raw_value':
yield Metric(
'attr_{col}'.format(col=col),
labels, entry[col])
seen.add(entry['name'])
def collect_ata_error_count(device):
"""Inspect the device error log and report the amount of entries.
Args:
device: (Device) Device in question.
Yields:
(Metric) Device error count.
"""
error_log = smart_ctl(
'-l', 'xerror,1', *device.smartctl_select(), check=False)
m = ata_error_count_re.search(error_log)
error_count = m.group(1) if m is not None else 0
yield Metric('device_errors', device.base_labels, error_count)
def collect_disks_smart_metrics(wakeup_disks):
now = int(datetime.datetime.utcnow().timestamp())
for device in find_devices():
yield Metric('smartctl_run', device.base_labels, now)
is_active = device_is_active(device)
yield Metric('device_active', device.base_labels, is_active)
# Skip further metrics collection to prevent the disk from
# spinning up.
if not is_active and not wakeup_disks:
continue
yield from collect_device_info(device)
smart_available, smart_enabled = device_smart_capabilities(device)
yield Metric(
'device_smart_available', device.base_labels, smart_available)
yield Metric(
'device_smart_enabled', device.base_labels, smart_enabled)
# Skip further metrics collection here if SMART is disabled
# on the device. Further smartctl invocations would fail
# anyways.
if not smart_available:
continue
yield from collect_device_health_self_assessment(device)
if device.type.startswith('sat'):
yield from collect_ata_metrics(device)
yield from collect_ata_error_count(device)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--wakeup-disks', dest='wakeup_disks', action='store_true')
args = parser.parse_args(sys.argv[1:])
version_metric = Metric('smartctl_version', {
'version': smart_ctl_version()
}, True)
metric_print_meta(version_metric, 'smartmon_')
metric_print(version_metric, 'smartmon_')
metrics = list(collect_disks_smart_metrics(args.wakeup_disks))
metrics.sort(key=lambda i: i.name)
previous_name = None
for m in metrics:
if m.name != previous_name:
metric_print_meta(m, 'smartmon_')
previous_name = m.name
metric_print(m, 'smartmon_')
if __name__ == '__main__':
main()
| 28.227041 | 91 | 0.642657 |
import argparse
import collections
import csv
import datetime
import decimal
import re
import shlex
import subprocess
import sys
device_info_re = re.compile(r'^(?P<k>[^:]+?)(?:(?:\sis|):)\s*(?P<v>.*)$')
ata_error_count_re = re.compile(
r'^Error (\d+) \[\d+\] occurred', re.MULTILINE)
self_test_re = re.compile(r'^SMART.*(PASSED|OK)$', re.MULTILINE)
device_info_map = {
'Vendor': 'vendor',
'Product': 'product',
'Revision': 'revision',
'Logical Unit id': 'lun_id',
'Model Family': 'model_family',
'Device Model': 'device_model',
'Serial Number': 'serial_number',
'Firmware Version': 'firmware_version',
}
smart_attributes_whitelist = {
'airflow_temperature_cel',
'command_timeout',
'current_pending_sector',
'end_to_end_error',
'erase_fail_count_total',
'g_sense_error_rate',
'hardware_ecc_recovered',
'host_reads_mib',
'host_reads_32mib',
'host_writes_mib',
'host_writes_32mib',
'load_cycle_count',
'media_wearout_indicator',
'wear_leveling_count',
'nand_writes_1gib',
'offline_uncorrectable',
'power_cycle_count',
'power_on_hours',
'program_fail_count',
'raw_read_error_rate',
'reallocated_event_count',
'reallocated_sector_ct',
'reported_uncorrect',
'sata_downshift_count',
'seek_error_rate',
'spin_retry_count',
'spin_up_time',
'start_stop_count',
'temperature_case',
'temperature_celsius',
'temperature_internal',
'total_lbas_read',
'total_lbas_written',
'udma_crc_error_count',
'unsafe_shutdown_count',
'workld_host_reads_perc',
'workld_media_wear_indic',
'workload_minutes',
}
Metric = collections.namedtuple('Metric', 'name labels value')
SmartAttribute = collections.namedtuple('SmartAttribute', [
'id', 'name', 'flag', 'value', 'worst', 'threshold', 'type', 'updated',
'when_failed', 'raw_value',
])
class Device(collections.namedtuple('DeviceBase', 'path opts')):
@property
def type(self):
return self.opts.type
@property
def base_labels(self):
return {'device': self.path, 'disk': self.type.partition('+')[2] or '0'}
def smartctl_select(self):
return ['--device', self.type, self.path]
def metric_key(metric, prefix=''):
return '{prefix}{metric.name}'.format(prefix=prefix, metric=metric)
def metric_format(metric, prefix=''):
key = metric_key(metric, prefix)
labels = ','.join(
'{k}="{v}"'.format(k=k, v=v.replace('"', '\\"')) for k, v in metric.labels.items())
value = decimal.Decimal(metric.value)
return '{key}{{{labels}}} {value}'.format(
key=key, labels=labels, value=value)
def metric_print_meta(metric, prefix=''):
key = metric_key(metric, prefix)
print('# HELP {key} SMART metric {metric.name}'.format(
key=key, metric=metric))
print('# TYPE {key} gauge'.format(key=key))
def metric_print(metric, prefix=''):
print(metric_format(metric, prefix))
def smart_ctl(*args, check=True):
return subprocess.run(
['smartctl', *args], stdout=subprocess.PIPE, check=check
).stdout.decode('utf-8')
def smart_ctl_version():
return smart_ctl('-V').split('\n')[0].split()[1]
def find_devices():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', dest='type')
devices = smart_ctl('--scan-open')
for device in devices.split('\n'):
device = device.strip()
if not device:
continue
tokens = shlex.split(device, comments=True)
if not tokens:
continue
yield Device(tokens[0], parser.parse_args(tokens[1:]))
def device_is_active(device):
try:
smart_ctl('--nocheck', 'standby', *device.smartctl_select())
except subprocess.CalledProcessError:
return False
return True
def device_info(device):
info_lines = smart_ctl(
'--info', *device.smartctl_select()
).strip().split('\n')[3:]
matches = (device_info_re.match(line) for line in info_lines)
return (m.groups() for m in matches if m is not None)
def device_smart_capabilities(device):
groups = device_info(device)
state = {
g[1].split(' ', 1)[0]
for g in groups if g[0] == 'SMART support'}
smart_available = 'Available' in state
smart_enabled = 'Enabled' in state
return smart_available, smart_enabled
def collect_device_info(device):
values = dict(device_info(device))
yield Metric('device_info', {
**device.base_labels,
**{v: values[k] for k, v in device_info_map.items() if k in values}
}, True)
def collect_device_health_self_assessment(device):
out = smart_ctl('--health', *device.smartctl_select(), check=False)
self_assessment_passed = bool(self_test_re.search(out))
yield Metric(
'device_smart_healthy', device.base_labels, self_assessment_passed)
def collect_ata_metrics(device):
attributes = smart_ctl(
'--attributes', *device.smartctl_select()
)
attributes = re.sub(r'[\t\x20]+', ' ', attributes)
attribute_lines = attributes.strip().split('\n')[7:]
# yield attributes that already have been reported before.
seen = set()
reader = csv.DictReader(
(line.strip() for line in attribute_lines),
fieldnames=SmartAttribute._fields[:-1],
restkey=SmartAttribute._fields[-1], delimiter=' ')
for entry in reader:
# We're only interested in the SMART attributes that are
entry['name'] = entry['name'].lower()
if entry['name'] not in smart_attributes_whitelist:
continue
# properly as a prometheus metric.
m = re.match(r'^(\d+)', ' '.join(entry['raw_value']))
if not m:
continue
entry['raw_value'] = m.group(1)
# Some device models report "---" in the threshold value where most
# devices would report "000". We do the substitution here because
# downstream code expects values to be convertable to integer.
if entry['threshold'] == '---':
entry['threshold'] = '0'
if entry['name'] in smart_attributes_whitelist and entry['name'] not in seen:
labels = {
'name': entry['name'],
**device.base_labels,
}
for col in 'value', 'worst', 'threshold', 'raw_value':
yield Metric(
'attr_{col}'.format(col=col),
labels, entry[col])
seen.add(entry['name'])
def collect_ata_error_count(device):
error_log = smart_ctl(
'-l', 'xerror,1', *device.smartctl_select(), check=False)
m = ata_error_count_re.search(error_log)
error_count = m.group(1) if m is not None else 0
yield Metric('device_errors', device.base_labels, error_count)
def collect_disks_smart_metrics(wakeup_disks):
now = int(datetime.datetime.utcnow().timestamp())
for device in find_devices():
yield Metric('smartctl_run', device.base_labels, now)
is_active = device_is_active(device)
yield Metric('device_active', device.base_labels, is_active)
# Skip further metrics collection to prevent the disk from
# spinning up.
if not is_active and not wakeup_disks:
continue
yield from collect_device_info(device)
smart_available, smart_enabled = device_smart_capabilities(device)
yield Metric(
'device_smart_available', device.base_labels, smart_available)
yield Metric(
'device_smart_enabled', device.base_labels, smart_enabled)
# Skip further metrics collection here if SMART is disabled
# on the device. Further smartctl invocations would fail
# anyways.
if not smart_available:
continue
yield from collect_device_health_self_assessment(device)
if device.type.startswith('sat'):
yield from collect_ata_metrics(device)
yield from collect_ata_error_count(device)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--wakeup-disks', dest='wakeup_disks', action='store_true')
args = parser.parse_args(sys.argv[1:])
version_metric = Metric('smartctl_version', {
'version': smart_ctl_version()
}, True)
metric_print_meta(version_metric, 'smartmon_')
metric_print(version_metric, 'smartmon_')
metrics = list(collect_disks_smart_metrics(args.wakeup_disks))
metrics.sort(key=lambda i: i.name)
previous_name = None
for m in metrics:
if m.name != previous_name:
metric_print_meta(m, 'smartmon_')
previous_name = m.name
metric_print(m, 'smartmon_')
if __name__ == '__main__':
main()
| true | true |
1c39b62f6f2c649c29dfa51dc505c3af2898db80 | 56,838 | py | Python | src/azure-cli/azure/cli/command_modules/acs/_help.py | chunyu3/azure-cli | 481df7ec3f42067bdf078692cb32e9a27baa6821 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/_help.py | chunyu3/azure-cli | 481df7ec3f42067bdf078692cb32e9a27baa6821 | [
"MIT"
] | 1 | 2021-02-25T19:22:13.000Z | 2021-02-25T19:22:13.000Z | src/azure-cli/azure/cli/command_modules/acs/_help.py | chunyu3/azure-cli | 481df7ec3f42067bdf078692cb32e9a27baa6821 | [
"MIT"
] | 1 | 2021-08-10T02:08:51.000Z | 2021-08-10T02:08:51.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=import-error
# pylint: disable=line-too-long, too-many-lines
helps['acs'] = """
type: group
short-summary: Manage Azure Container Services.
long-summary: |
ACS will be retired as a standalone service on January 31, 2020.
If you use the Kubernetes orchestrator, please migrate to AKS by January 31, 2020.
"""
helps['acs browse'] = """
type: command
short-summary: Show the dashboard for a service container's orchestrator in a web browser.
examples:
- name: Show the dashboard for a service container's orchestrator in a web browser. (autogenerated)
text: az acs browse --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs create'] = """
type: command
short-summary: Create a new container service.
parameters:
- name: --service-principal
type: string
short-summary: Service principal used for authentication to Azure APIs.
long-summary: If not specified, a new service principal with the contributor role is created and cached at $HOME/.azure/acsServicePrincipal.json to be used by subsequent `az acs` commands.
- name: --client-secret
type: string
short-summary: Secret associated with the service principal. This argument is required if `--service-principal` is specified.
- name: --agent-count
short-summary: Set the default number of agents for the agent pools.
long-summary: Note that DC/OS clusters will have 1 or 2 additional public agents.
examples:
- name: Create a DCOS cluster with an existing SSH key.
text: |-
az acs create --orchestrator-type DCOS -g MyResourceGroup -n MyContainerService \\
--ssh-key-value /path/to/publickey
- name: Create a DCOS cluster with two agent pools.
text: |-
az acs create -g MyResourceGroup -n MyContainerService --agent-profiles '[ \\
{ \\
"name": "agentpool1" \\
}, \\
{ \\
"name": "agentpool2" \\
}]'
- name: Create a DCOS cluster where the second agent pool has a vmSize specified.
text: |-
az acs create -g MyResourceGroup -n MyContainerService --agent-profiles '[ \\
{ \\
"name": "agentpool1" \\
}, \\
{ \\
"name": "agentpool2", \\
"vmSize": "Standard_D2" \\
}]'
- name: Create a DCOS cluster with agent-profiles specified from a file.
text: az acs create -g MyResourceGroup -n MyContainerService --agent-profiles MyAgentProfiles.json
"""
helps['acs dcos'] = """
type: group
short-summary: Commands to manage a DC/OS-orchestrated Azure Container Service.
"""
helps['acs dcos install-cli'] = """
type: command
short-summary: Download and install the DC/OS command-line tool for a cluster.
"""
helps['acs delete'] = """
type: command
short-summary: Delete a container service.
examples:
- name: Delete a container service. (autogenerated)
text: az acs delete --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs kubernetes'] = """
type: group
short-summary: Commands to manage a Kubernetes-orchestrated Azure Container Service.
"""
helps['acs kubernetes get-credentials'] = """
type: command
short-summary: Download and install credentials to access a cluster. This command requires the same private-key used to create the cluster.
parameters:
- name: --output -o
type: string
long-summary: Credentials are always in YAML format, so this argument is effectively ignored.
examples:
- name: Download and install credentials to access a cluster. This command requires the same private-key used to create the cluster. (autogenerated)
text: az acs kubernetes get-credentials --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs kubernetes install-cli'] = """
type: command
short-summary: Download and install the Kubernetes command-line tool for a cluster.
"""
helps['acs list'] = """
type: command
short-summary: List container services.
"""
helps['acs list-locations'] = """
type: command
short-summary: List locations where Azure Container Service is in preview and in production.
"""
helps['acs scale'] = """
type: command
short-summary: Change the private agent count of a container service.
parameters:
- name: --new-agent-count
type: int
short-summary: The number of agents for the container service.
examples:
- name: Change the private agent count of a container service. (autogenerated)
text: az acs scale --name MyContainerService --new-agent-count 10 --resource-group MyResourceGroup
crafted: true
"""
helps['acs show'] = """
type: command
short-summary: Show the details for a container service.
examples:
- name: Show the details for a container service. (autogenerated)
text: az acs show --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs wait'] = """
type: command
short-summary: Wait for a container service to reach a desired state.
long-summary: If an operation on a container service was interrupted or was started with `--no-wait`, use this command to wait for it to complete.
"""
helps['aks'] = """
type: group
short-summary: Manage Azure Kubernetes Services.
"""
helps["aks check-acr"] = """
type: command
short-summary: Validate an ACR is accesible from an AKS cluster.
parameters:
- name: --acr
short-summary: The FQDN of the ACR.
examples:
- name: Validate the ACR is accesible from the AKS cluster.
text: az aks check-acr --name MyManagedCluster --resource-group MyResourceGroup --acr myacr.azurecr.io
crafted: true
"""
helps['aks browse'] = """
type: command
short-summary: Show the dashboard for a Kubernetes cluster in a web browser.
parameters:
- name: --disable-browser
type: bool
short-summary: Don't launch a web browser after establishing port-forwarding.
long-summary: Add this argument when launching a web browser manually, or for automated testing.
- name: --listen-port
short-summary: The listening port for the dashboard.
long-summary: Add this argument when the default listening port is used by another process or unavailable.
- name: --listen-address
short-summary: The listening address for the dashboard.
long-summary: Add this argument to listen on a specific IP address.
examples:
- name: Show the dashboard for a Kubernetes cluster in a web browser. (autogenerated)
text: az aks browse --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks create'] = """
type: command
short-summary: Create a new managed Kubernetes cluster.
parameters:
- name: --generate-ssh-keys
type: string
short-summary: Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory.
- name: --service-principal
type: string
short-summary: Service principal used for authentication to Azure APIs.
long-summary: If not specified, a new service principal is created and cached at $HOME/.azure/aksServicePrincipal.json to be used by subsequent `az aks` commands.
- name: --skip-subnet-role-assignment
type: bool
short-summary: Skip role assignment for subnet (advanced networking).
long-summary: If specified, please make sure your service principal has the access to your subnet.
- name: --client-secret
type: string
short-summary: Secret associated with the service principal. This argument is required if `--service-principal` is specified.
- name: --node-vm-size -s
type: string
short-summary: Size of Virtual Machines to create as Kubernetes nodes.
- name: --dns-name-prefix -p
type: string
short-summary: Prefix for hostnames that are created. If not specified, generate a hostname using the managed cluster and resource group names.
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes node pool. After creating a cluster, you can change the size of its node pool with `az aks scale`.
- name: --zones -z
type: string array
short-summary: Availability zones where agent nodes will be placed.
- name: --node-osdisk-size
type: int
short-summary: Size in GB of the OS disk for each node in the node pool. Minimum 30 GB.
- name: --node-osdisk-type
type: string
short-summary: "OS disk type to be used for machines in a given agent pool: Ephemeral or Managed. Defaults to 'Ephemeral' when possible in conjunction with VM size and OS disk size. May not be changed for this pool after creation."
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to use for creating the cluster, such as "1.16.9".
populator-commands:
- "`az aks get-versions`"
- name: --ssh-key-value
type: string
short-summary: Public key path or key contents to install on node VMs for SSH access. For example, 'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm'.
- name: --admin-username -u
type: string
short-summary: User account to create on node VMs for SSH access.
- name: --windows-admin-username
type: string
short-summary: Username to create on Windows node VMs.
- name: --windows-admin-password
type: string
short-summary: Password to create on Windows node VMs.
- name: --enable-ahub
type: bool
short-summary: Enable Azure Hybrid User Benefits (AHUB) for Windows VMs.
- name: --enable-aad
type: bool
short-summary: Enable managed AAD feature for cluster.
- name: --aad-admin-group-object-ids
type: string
short-summary: Comma seperated list of aad group object IDs that will be set as cluster admin.
- name: --aad-client-app-id
type: string
short-summary: The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl.
- name: --aad-server-app-id
type: string
short-summary: The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application).
- name: --aad-server-app-secret
type: string
short-summary: The secret of an Azure Active Directory server application.
- name: --aad-tenant-id
type: string
short-summary: The ID of an Azure Active Directory tenant.
- name: --dns-service-ip
type: string
short-summary: An IP address assigned to the Kubernetes DNS service.
long-summary: This address must be within the Kubernetes service address range specified by "--service-cidr". For example, 10.0.0.10.
- name: --docker-bridge-address
type: string
short-summary: A specific IP address and netmask for the Docker bridge, using standard CIDR notation.
long-summary: This address must not be in any Subnet IP ranges, or the Kubernetes service address range. For example, 172.17.0.1/16.
- name: --load-balancer-sku
type: string
short-summary: Azure Load Balancer SKU selection for your cluster. basic or standard.
long-summary: Select between Basic or Standard Azure Load Balancer SKU for your AKS cluster.
- name: --load-balancer-managed-outbound-ip-count
type: int
short-summary: Load balancer managed outbound IP count.
long-summary: Desired number of managed outbound IPs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only.
- name: --load-balancer-outbound-ips
type: string
short-summary: Load balancer outbound IP resource IDs.
long-summary: Comma-separated public IP resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only.
- name: --load-balancer-outbound-ip-prefixes
type: string
short-summary: Load balancer outbound IP prefix resource IDs.
long-summary: Comma-separated public IP prefix resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only.
- name: --load-balancer-outbound-ports
type: int
short-summary: Load balancer outbound allocated ports.
long-summary: Desired static number of outbound ports per VM in the load balancer backend pool. By default, set to 0 which uses the default allocation based on the number of VMs.
- name: --load-balancer-idle-timeout
type: int
short-summary: Load balancer idle timeout in minutes.
long-summary: Desired idle timeout for load balancer outbound flows, default is 30 minutes. Please specify a value in the range of [4, 100].
- name: --outbound-type
type: string
short-summary: How outbound traffic will be configured for a cluster.
long-summary: Select between loadBalancer and userDefinedRouting. If not set, defaults to type loadBalancer. Requires --vnet-subnet-id to be provided with a preconfigured route table and --load-balancer-sku to be Standard.
- name: --enable-cluster-autoscaler
type: bool
short-summary: Enable cluster autoscaler, default value is false.
long-summary: If specified, please make sure the kubernetes version is larger than 1.10.6.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100].
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100].
- name: --vm-set-type
type: string
short-summary: Agent pool vm set type. VirtualMachineScaleSets or AvailabilitySet.
- name: --enable-addons -a
type: string
short-summary: Enable the Kubernetes addons in a comma-separated list.
long-summary: |-
These addons are available:
http_application_routing - configure ingress with automatic public DNS name creation.
monitoring - turn on Log Analytics monitoring. Uses the Log Analytics Default Workspace if it exists, else creates one.
Specify "--workspace-resource-id" to use an existing workspace.
If monitoring addon is enabled --no-wait argument will have no effect
azure-policy - enable Azure policy. The Azure Policy add-on for AKS enables at-scale enforcements and safeguards on your clusters in a centralized, consistent manner.
Learn more at aka.ms/aks/policy.
virtual-node - enable AKS Virtual Node.
Requires --aci-subnet-name to provide the name of an existing subnet for the Virtual Node to use.
aci-subnet-name must be in the same vnet which is specified by --vnet-subnet-id (required as well).
confcom - enable confcom addon, this will enable SGX device plugin by default.
- name: --disable-rbac
type: bool
short-summary: Disable Kubernetes Role-Based Access Control.
- name: --enable-rbac -r
type: bool
short-summary: "Enable Kubernetes Role-Based Access Control. Default: enabled."
- name: --max-pods -m
type: int
short-summary: The maximum number of pods deployable to a node.
long-summary: If not specified, defaults to 110, or 30 for advanced networking configurations.
- name: --network-plugin
type: string
short-summary: The Kubernetes network plugin to use.
long-summary: Specify "azure" for advanced networking configurations. Defaults to "kubenet".
- name: --network-policy
type: string
short-summary: The Kubernetes network policy to use.
long-summary: |
Using together with "azure" network plugin.
Specify "azure" for Azure network policy manager and "calico" for calico network policy controller.
Defaults to "" (network policy disabled).
- name: --no-ssh-key -x
type: string
short-summary: Do not use or create a local SSH key.
long-summary: To access nodes after creating a cluster with this option, use the Azure Portal.
- name: --pod-cidr
type: string
short-summary: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
long-summary: This range must not overlap with any Subnet IP ranges. For example, 172.244.0.0/16.
- name: --service-cidr
type: string
short-summary: A CIDR notation IP range from which to assign service cluster IPs.
long-summary: This range must not overlap with any Subnet IP ranges. For example, 10.0.0.0/16.
- name: --vnet-subnet-id
type: string
short-summary: The ID of a subnet in an existing VNet into which to deploy the cluster.
- name: --ppg
type: string
short-summary: The ID of a PPG.
- name: --enable-node-public-ip
type: bool
short-summary: Enable VMSS node public IP.
- name: --workspace-resource-id
type: string
short-summary: The resource ID of an existing Log Analytics Workspace to use for storing monitoring data. If not specified, uses the default Log Analytics Workspace if it exists, otherwise creates one.
- name: --uptime-sla
type: bool
short-summary: Enable a paid managed cluster service with a financially backed SLA.
- name: --attach-acr
type: string
short-summary: Grant the 'acrpull' role assignment to the ACR specified by name or resource ID.
- name: --enable-private-cluster
type: string
short-summary: Enable private cluster.
- name: --api-server-authorized-ip-ranges
type: string
short-summary: Comma seperated list of authorized apiserver IP ranges. Set to 0.0.0.0/32 to restrict apiserver traffic to node pools.
- name: --enable-managed-identity
type: bool
short-summary: Using a system assigned managed identity to manage cluster resource group.
- name: --assign-identity
type: string
short-summary: Specify an existing user assigned identity for control plane's usage in order to manage cluster resource group.
- name: --node-osdisk-diskencryptionset-id -d
type: string
short-summary: ResourceId of the disk encryption set to use for enabling encryption at rest on agent node os disk.
- name: --aci-subnet-name
type: string
short-summary: The name of a subnet in an existing VNet into which to deploy the virtual nodes.
- name: --appgw-name
type: string
short-summary: Name of the application gateway to create/use in the node resource group. Use with ingress-azure addon.
- name: --appgw-subnet-cidr
type: string
short-summary: Subnet CIDR to use for a new subnet created to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-id
type: string
short-summary: Resource Id of an existing Application Gateway to use with AGIC. Use with ingress-azure addon.
- name: --appgw-subnet-id
type: string
short-summary: Resource Id of an existing Subnet used to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-watch-namespace
type: string
short-summary: Specify the namespace, which AGIC should watch. This could be a single string value, or a comma-separated list of namespaces.
- name: --enable-sgxquotehelper
type: bool
short-summary: Enable SGX quote helper for confcom addon.
examples:
- name: Create a Kubernetes cluster with an existing SSH public key.
text: az aks create -g MyResourceGroup -n MyManagedCluster --ssh-key-value /path/to/publickey
- name: Create a Kubernetes cluster with a specific version.
text: az aks create -g MyResourceGroup -n MyManagedCluster --kubernetes-version 1.16.9
- name: Create a Kubernetes cluster with a larger node pool.
text: az aks create -g MyResourceGroup -n MyManagedCluster --node-count 7
- name: Create a kubernetes cluster with k8s 1.13.9 but use vmas.
text: az aks create -g MyResourceGroup -n MyManagedCluster --kubernetes-version 1.16.9 --vm-set-type AvailabilitySet
- name: Create a kubernetes cluster with default kubernetes version, default SKU load balancer (Standard) and default vm set type (VirtualMachineScaleSets).
text: az aks create -g MyResourceGroup -n MyManagedCluster
- name: Create a kubernetes cluster with standard SKU load balancer and two AKS created IPs for the load balancer outbound connection usage.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2
- name: Create a kubernetes cluster with a standard SKU load balancer, with two outbound AKS managed IPs an idle flow timeout of 5 minutes and 8000 allocated ports per machine
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2 --load-balancer-idle-timeout 5 --load-balancer-outbound-ports 8000
- name: Create a kubernetes cluster with standard SKU load balancer and use the provided public IPs for the load balancer outbound connection usage.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ips <ip-resource-id-1,ip-resource-id-2>
- name: Create a kubernetes cluster with standard SKU load balancer and use the provided public IP prefixes for the load balancer outbound connection usage.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ip-prefixes <ip-prefix-resource-id-1,ip-prefix-resource-id-2>
- name: Create a kubernetes cluster with basic SKU load balancer and AvailabilitySet vm set type.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-sku basic --vm-set-type AvailabilitySet
- name: Create a kubernetes cluster with authorized apiserver IP ranges.
text: az aks create -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges 193.168.1.0/24,194.168.1.0/24,195.168.1.0
- name: Create a kubernetes cluster which enables managed identity.
text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-managed-identity
- name: Create a kubernetes cluster with userDefinedRouting, standard load balancer SKU and a custom subnet preconfigured with a route table
text: az aks create -g MyResourceGroup -n MyManagedCluster --outbound-type userDefinedRouting --load-balancer-sku standard --vnet-subnet-id customUserSubnetVnetID
- name: Create a kubernetes cluster with supporting Windows agent pools.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-sku Standard --network-plugin azure --windows-admin-username azure --windows-admin-password 'replacePassword1234$'
- name: Create a kubernetes cluster with supporting Windows agent pools with AHUB enabled.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-sku Standard --network-plugin azure --windows-admin-username azure --windows-admin-password 'replacePassword1234$' --enable-ahub
- name: Create a kubernetes cluster with managed AAD enabled.
text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-aad --aad-admin-group-object-ids <id-1,id-2> --aad-tenant-id <id>
- name: Create a kubernetes cluster with server side encryption using your owned key.
text: az aks create -g MyResourceGroup -n MyManagedCluster --node-osdisk-diskencryptionset-id <disk-encryption-set-resource-id>
- name: Create a kubernetes cluster with ephemeral OS enabled.
text: az aks create -g MyResourceGroup -n MyManagedCluster --node-osdisk-type Ephemeral --node-osdisk-size 48
"""
helps['aks update'] = """
type: command
short-summary: Update a managed Kubernetes cluster.
parameters:
- name: --enable-cluster-autoscaler -e
type: bool
short-summary: Enable cluster autoscaler.
- name: --disable-cluster-autoscaler -d
type: bool
short-summary: Disable cluster autoscaler.
- name: --update-cluster-autoscaler -u
type: bool
short-summary: Update min-count or max-count for cluster autoscaler.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --uptime-sla
type: bool
short-summary: Enable a paid managed cluster service with a financially backed SLA.
- name: --load-balancer-managed-outbound-ip-count
type: int
short-summary: Load balancer managed outbound IP count.
long-summary: Desired number of managed outbound IPs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only. If updated, it will wipe off the existing setting on Load balancer managed outbound IP count; Load balancer outbound IP resource IDs and Load balancer outbound IP prefix resource IDs.
- name: --load-balancer-outbound-ips
type: string
short-summary: Load balancer outbound IP resource IDs.
long-summary: Comma-separated public IP resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only. If updated, it will wipe off the existing setting on Load balancer managed outbound IP count; Load balancer outbound IP resource IDs and Load balancer outbound IP prefix resource IDs.
- name: --load-balancer-outbound-ip-prefixes
type: string
short-summary: Load balancer outbound IP prefix resource IDs.
long-summary: Comma-separated public IP prefix resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only. If updated, it will wipe off the existing setting on Load balancer managed outbound IP count; Load balancer outbound IP resource IDs and Load balancer outbound IP prefix resource IDs.
- name: --load-balancer-outbound-ports
type: int
short-summary: Load balancer outbound allocated ports.
long-summary: Desired static number of outbound ports per VM in the load balancer backend pool. By default, set to 0 which uses the default allocation based on the number of VMs.
- name: --load-balancer-idle-timeout
type: int
short-summary: Load balancer idle timeout in minutes.
long-summary: Desired idle timeout for load balancer outbound flows, default is 30 minutes. Please specify a value in the range of [4, 100].
- name: --attach-acr
type: string
short-summary: Grant the 'acrpull' role assignment to the ACR specified by name or resource ID.
- name: --detach-acr
type: string
short-summary: Disable the 'acrpull' role assignment to the ACR specified by name or resource ID.
- name: --api-server-authorized-ip-ranges
type: string
short-summary: Comma seperated list of authorized apiserver IP ranges. Set to "" to allow all traffic on a previously restricted cluster. Set to 0.0.0.0/32 to restrict apiserver traffic to node pools.
- name: --enable-aad
type: bool
short-summary: Enable managed AAD feature for cluster.
- name: --aad-admin-group-object-ids
type: string
short-summary: Comma seperated list of aad group object IDs that will be set as cluster admin.
- name: --aad-tenant-id
type: string
short-summary: The ID of an Azure Active Directory tenant.
- name: --enable-ahub
type: bool
short-summary: Enable Azure Hybrid User Benefits (AHUB) feature for cluster.
- name: --disable-ahub
type: bool
short-summary: Disable Azure Hybrid User Benefits (AHUB) feature for cluster.
examples:
- name: Update a kubernetes cluster with standard SKU load balancer to use two AKS created IPs for the load balancer outbound connection usage.
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2
- name: Update a kubernetes cluster with standard SKU load balancer to use the provided public IPs for the load balancer outbound connection usage.
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ips <ip-resource-id-1,ip-resource-id-2>
- name: Create a kubernetes cluster with a standard SKU load balancer, with two outbound AKS managed IPs an idle flow timeout of 5 minutes and 8000 allocated ports per machine
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2 --load-balancer-idle-timeout 5 --load-balancer-outbound-ports 8000
- name: Update a kubernetes cluster with standard SKU load balancer to use the provided public IP prefixes for the load balancer outbound connection usage.
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ip-prefixes <ip-prefix-resource-id-1,ip-prefix-resource-id-2>
- name: Attach AKS cluster to ACR by name "acrName"
text: az aks update -g MyResourceGroup -n MyManagedCluster --attach-acr acrName
- name: Update a kubernetes cluster with authorized apiserver ip ranges.
text: az aks update -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges 193.168.1.0/24,194.168.1.0/24
- name: Disable authorized apiserver ip ranges feature for a kubernetes cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges ""
- name: Restrict apiserver traffic in a kubernetes cluster to agentpool nodes.
text: az aks update -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges 0.0.0.0/32
- name: Update a AKS-managed AAD cluster with tenant ID or admin group object IDs.
text: az aks update -g MyResourceGroup -n MyManagedCluster --aad-admin-group-object-ids <id-1,id-2> --aad-tenant-id <id>
- name: Migrate a AKS AAD-Integrated cluster or a non-AAD cluster to a AKS-managed AAD cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-aad --aad-admin-group-object-ids <id-1,id-2> --aad-tenant-id <id>
- name: Enable Azure Hybrid User Benefits featture for a kubernetes cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-ahub
- name: Disable Azure Hybrid User Benefits featture for a kubernetes cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --disable-ahub
"""
helps['aks delete'] = """
type: command
short-summary: Delete a managed Kubernetes cluster.
examples:
- name: Delete a managed Kubernetes cluster. (autogenerated)
text: az aks delete --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks disable-addons'] = """
type: command
short-summary: Disable Kubernetes addons.
parameters:
- name: --addons -a
type: string
short-summary: Disable the Kubernetes addons in a comma-separated list.
examples:
- name: Disable Kubernetes addons. (autogenerated)
text: az aks disable-addons --addons virtual-node --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks enable-addons'] = """
type: command
short-summary: Enable Kubernetes addons.
long-summary: |-
These addons are available:
http_application_routing - configure ingress with automatic public DNS name creation.
monitoring - turn on Log Analytics monitoring. Requires "--workspace-resource-id".
If monitoring addon is enabled --no-wait argument will have no effect
virtual-node - enable AKS Virtual Node. Requires --subnet-name to provide the name of an existing subnet for the Virtual Node to use.
azure-policy - enable Azure policy. The Azure Policy add-on for AKS enables at-scale enforcements and safeguards on your clusters in a centralized, consistent manner.
Learn more at aka.ms/aks/policy.
ingress-appgw - enable Application Gateway Ingress Controller addon.
parameters:
- name: --addons -a
type: string
short-summary: Enable the Kubernetes addons in a comma-separated list.
- name: --workspace-resource-id
type: string
short-summary: The resource ID of an existing Log Analytics Workspace to use for storing monitoring data.
- name: --appgw-name
type: string
short-summary: Name of the application gateway to create/use in the node resource group. Use with ingress-azure addon.
- name: --appgw-subnet-cidr
type: string
short-summary: Subnet CIDR to use for a new subnet created to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-id
type: string
short-summary: Resource Id of an existing Application Gateway to use with AGIC. Use with ingress-azure addon.
- name: --appgw-subnet-id
type: string
short-summary: Resource Id of an existing Subnet used to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-watch-namespace
type: string
short-summary: Specify the namespace, which AGIC should watch. This could be a single string value, or a comma-separated list of namespaces.
- name: --enable-sgxquotehelper
type: bool
short-summary: Enable SGX quote helper for confcom addon.
examples:
- name: Enable Kubernetes addons. (autogenerated)
text: az aks enable-addons --addons virtual-node --name MyManagedCluster --resource-group MyResourceGroup --subnet MySubnetName
crafted: true
- name: Enable ingress-appgw addon with subnet prefix.
text: az aks enable-addons --name MyManagedCluster --resource-group MyResourceGroup --addons ingress-appgw --appgw-subnet-cidr 10.2.0.0/16 --appgw-name gateway
crafted: true
"""
helps['aks get-credentials'] = """
type: command
short-summary: Get access credentials for a managed Kubernetes cluster.
long-summary: By default, the credentials are merged into the .kube/config file so kubectl can use them. See -f parameter for details.
parameters:
- name: --admin -a
type: bool
short-summary: "Get cluster administrator credentials. Default: cluster user credentials."
long-summary: "On clusters with Azure Active Directory integration, this bypasses normal Azure AD authentication and can be used if you're permanently blocked by not having access to a valid Azure AD group with access to your cluster. Requires 'Azure Kubernetes Service Cluster Admin' role."
- name: --file -f
type: string
short-summary: Kubernetes configuration file to update. Use "-" to print YAML to stdout instead.
- name: --overwrite-existing
type: bool
short-summary: Overwrite any existing cluster entry with the same name.
- name: --output -o
type: string
long-summary: Credentials are always in YAML format, so this argument is effectively ignored.
examples:
- name: Get access credentials for a managed Kubernetes cluster. (autogenerated)
text: az aks get-credentials --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks get-upgrades'] = """
type: command
short-summary: Get the upgrade versions available for a managed Kubernetes cluster.
examples:
- name: Get the upgrade versions available for a managed Kubernetes cluster
text: az aks get-upgrades --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks get-versions'] = """
type: command
short-summary: Get the versions available for creating a managed Kubernetes cluster.
examples:
- name: Get the versions available for creating a managed Kubernetes cluster
text: az aks get-versions --location westus2
crafted: true
"""
helps['aks install-cli'] = """
type: command
short-summary: Download and install kubectl, the Kubernetes command-line tool. Download and install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
helps['aks list'] = """
type: command
short-summary: List managed Kubernetes clusters.
"""
helps['aks nodepool'] = """
type: group
short-summary: Commands to manage node pools in Kubernetes kubernetes cluster.
"""
helps['aks nodepool add'] = """
type: command
short-summary: Add a node pool to the managed Kubernetes cluster.
parameters:
- name: --node-vm-size -s
type: string
short-summary: Size of Virtual Machines to create as Kubernetes nodes.
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes agent pool. After creating a cluster, you can change the size of its node pool with `az aks scale`.
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to use for creating the cluster, such as "1.16.9".
populator-commands:
- "`az aks get-versions`"
- name: --node-osdisk-size
type: int
short-summary: Size in GB of the OS disk for each node in the agent pool. Minimum 30 GB.
- name: --node-osdisk-type
type: string
short-summary: OS disk type to be used for machines in a given agent pool. Defaults to 'Ephemeral' when possible in conjunction with VM size and OS disk size. May not be changed for this pool after creation.
- name: --max-pods -m
type: int
short-summary: The maximum number of pods deployable to a node.
long-summary: If not specified, defaults to 110, or 30 for advanced networking configurations.
- name: --zones -z
type: string array
short-summary: Availability zones where agent nodes will be placed.
- name: --enable-node-public-ip
type: bool
short-summary: Enable VMSS node public IP.
- name: --vnet-subnet-id
type: string
short-summary: The ID of a subnet in an existing VNet into which to deploy the cluster.
- name: --ppg
type: string
short-summary: The ID of a PPG.
- name: --os-type
type: string
short-summary: The OS Type. Linux or Windows.
- name: --enable-cluster-autoscaler -e
type: bool
short-summary: Enable cluster autoscaler.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --node-taints
type: string
short-summary: The node taints for the node pool. You can't change the node taints through CLI after the node pool is created.
- name: --labels
type: string
short-summary: The node labels for the node pool. You can't change the node labels through CLI after the node pool is created. See https://aka.ms/node-labels for syntax of labels.
- name: --mode
type: string
short-summary: The mode for a node pool which defines a node pool's primary function. If set as "System", AKS prefers system pods scheduling to node pools with mode `System`. Learn more at https://aka.ms/aks/nodepool/mode.
- name: --priority
type: string
short-summary: The priority of the node pool.
- name: --eviction-policy
type: string
short-summary: The eviction policy of the Spot node pool. It can only be set when --priority is Spot.
- name: --spot-max-price
type: float
short-summary: It can only be set when --priority is Spot. Specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand. It can only include up to 5 decimal places.
- name: --max-surge
type: string
short-summary: Extra nodes used to speed upgrade. When specified, it represents the number or percent used, eg. 5 or 33%
examples:
- name: Create a nodepool in an existing AKS cluster with ephemeral os enabled.
text: az aks nodepool add -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster --node-osdisk-type Ephemeral --node-osdisk-size 48
"""
helps['aks nodepool delete'] = """
type: command
short-summary: Delete the agent pool in the managed Kubernetes cluster.
"""
helps['aks nodepool get-upgrades'] = """
type: command
short-summary: Get the available upgrade versions for an agent pool of the managed Kubernetes cluster.
examples:
- name: Get the available upgrade versions for an agent pool of the managed Kubernetes cluster.
text: az aks nodepool get-upgrades --resource-group MyResourceGroup --cluster-name MyManagedCluster --nodepool-name MyNodePool
crafted: true
parameters:
- name: --nodepool-name
type: string
short-summary: name of the node pool.
"""
helps['aks nodepool list'] = """
type: command
short-summary: List node pools in the managed Kubernetes cluster.
"""
helps['aks nodepool scale'] = """
type: command
short-summary: Scale the node pool in a managed Kubernetes cluster.
parameters:
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes node pool.
"""
helps['aks nodepool show'] = """
type: command
short-summary: Show the details for a node pool in the managed Kubernetes cluster.
"""
helps['aks nodepool update'] = """
type: command
short-summary: Update a node pool to enable/disable cluster-autoscaler or change min-count or max-count
parameters:
- name: --enable-cluster-autoscaler -e
type: bool
short-summary: Enable cluster autoscaler.
- name: --disable-cluster-autoscaler -d
type: bool
short-summary: Disable cluster autoscaler.
- name: --update-cluster-autoscaler -u
type: bool
short-summary: Update min-count or max-count for cluster autoscaler.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --mode
type: string
short-summary: The mode for a node pool which defines a node pool's primary function. If set as "System", AKS prefers system pods scheduling to node pools with mode `System`. Learn more at https://aka.ms/aks/nodepool/mode.
- name: --max-surge
type: string
short-summary: Extra nodes used to speed upgrade. When specified, it represents the number or percent used, eg. 5 or 33%
examples:
- name: Enable cluster-autoscaler within node count range [1,5]
text: az aks nodepool update --enable-cluster-autoscaler --min-count 1 --max-count 5 -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster
- name: Disable cluster-autoscaler for an existing cluster
text: az aks nodepool update --disable-cluster-autoscaler -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster
- name: Update min-count or max-count for cluster autoscaler.
text: az aks nodepool update --update-cluster-autoscaler --min-count 1 --max-count 10 -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster
"""
helps['aks nodepool upgrade'] = """
type: command
short-summary: Upgrade the node pool in a managed Kubernetes cluster.
parameters:
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to upgrade the node pool to, such as "1.16.9".
- name: --node-image-only
type: bool
short-summary: Only upgrade agent pool's node image.
- name: --max-surge
type: string
short-summary: Extra nodes used to speed upgrade. When specified, it represents the number or percent used, eg. 5 or 33%
"""
helps['aks remove-dev-spaces'] = """
type: command
short-summary: Remove Azure Dev Spaces from a managed Kubernetes cluster.
examples:
- name: Remove Azure Dev Spaces from a managed Kubernetes cluster.
text: |-
az aks remove-dev-spaces -g my-aks-group -n my-aks
- name: Remove Azure Dev Spaces from a managed Kubernetes cluster without prompting.
text: |-
az aks remove-dev-spaces -g my-aks-group -n my-aks --yes
"""
helps['aks scale'] = """
type: command
short-summary: Scale the node pool in a managed Kubernetes cluster.
parameters:
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes node pool.
examples:
- name: Scale the node pool in a managed Kubernetes cluster. (autogenerated)
text: az aks scale --name MyManagedCluster --node-count 3 --resource-group MyResourceGroup
crafted: true
"""
helps['aks show'] = """
type: command
short-summary: Show the details for a managed Kubernetes cluster.
examples:
- name: Show the details for a managed Kubernetes cluster
text: az aks show --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks update-credentials'] = """
type: command
short-summary: Update credentials for a managed Kubernetes cluster, like service principal.
parameters:
- name: --reset-service-principal
type: bool
short-summary: Reset service principal for a managed cluster.
- name: --service-principal
type: string
short-summary: Service principal used for authentication to Azure APIs. This argument is required if `--reset-service-principal` is specified.
- name: --client-secret
type: string
short-summary: Secret associated with the service principal. This argument is required if `--service-principal` is specified.
- name: --reset-aad
type: string
short-summary: Reset Azure Active Directory configuration for a managed cluster.
- name: --aad-server-app-id
type: string
short-summary: The ID of an Azure Active Directory server application. This argument is required if `--reset-aad` is specified.
- name: --aad-server-app-secret
type: string
short-summary: The secret of an Azure Active Directory server application. This argument is required if `--reset-aad` is specified.
- name: --aad-client-app-id
type: string
short-summary: The ID of an Azure Active Directory client application. This argument is required if `--reset-aad` is specified.
- name: --aad-tenant-id
type: string
short-summary: Tenant ID associated with Azure Active Directory.
examples:
- name: Update an existing Kubernetes cluster with new service principal.
text: az aks update-credentials -g MyResourceGroup -n MyManagedCluster --reset-service-principal --service-principal MyNewServicePrincipalID --client-secret MyNewServicePrincipalSecret
- name: Update an existing Azure Active Directory Kubernetes cluster with new server app secret key.
text: az aks update-credentials -g MyResourceGroup -n MyManagedCluster --reset-aad --aad-server-app-id MyExistingAADServerAppID --aad-server-app-secret MyNewAADServerAppSecret --aad-client-app-id MyExistingAADClientAppID --aad-tenant-id MyAADTenantID
"""
helps['aks upgrade'] = """
type: command
short-summary: Upgrade a managed Kubernetes cluster to a newer version.
long-summary: "Kubernetes will be unavailable during cluster upgrades."
parameters:
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to upgrade the cluster to, such as "1.16.9".
populator-commands:
- "`az aks get-upgrades`"
- name: --control-plane-only
type: bool
short-summary: Upgrade the cluster control plane only. If not specified, both control plane AND all node pools will be upgraded.
- name: --node-image-only
type: bool
short-summary: Only upgrade node image for agent pools.
examples:
- name: Upgrade a managed Kubernetes cluster to a newer version. (autogenerated)
text: az aks upgrade --kubernetes-version 1.12.6 --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks use-dev-spaces'] = """
type: command
short-summary: Use Azure Dev Spaces with a managed Kubernetes cluster.
parameters:
- name: --update
type: bool
short-summary: Update to the latest Azure Dev Spaces client components.
- name: --space -s
type: string
short-summary: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
- name: --endpoint -e
type: string
short-summary: The endpoint type to be used for a Azure Dev Spaces controller. See https://aka.ms/azds-networking for more information.
examples:
- name: Use Azure Dev Spaces with a managed Kubernetes cluster, interactively selecting a dev space.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks
- name: Use Azure Dev Spaces with a managed Kubernetes cluster, updating to the latest Azure Dev Spaces \
client components and selecting a new or existing dev space 'my-space'.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks --update --space my-space
- name: Use Azure Dev Spaces with a managed Kubernetes cluster, selecting a new or existing dev space \
'develop/my-space' without prompting for confirmation.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks -s develop/my-space -y
- name: Use Azure Dev Spaces with a managed Kubernetes cluster with a private endpoint.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks -e private
"""
helps['aks wait'] = """
type: command
short-summary: Wait for a managed Kubernetes cluster to reach a desired state.
long-summary: If an operation on a cluster was interrupted or was started with `--no-wait`, use this command to wait for it to complete.
examples:
- name: Wait for a cluster to be upgraded, polling every minute for up to thirty minutes.
text: |-
az aks wait -g MyResourceGroup -n MyManagedCluster --updated --interval 60 --timeout 1800
- name: Wait for a managed Kubernetes cluster to reach a desired state (autogenerated)
text: az aks wait --created --interval 60 --name MyManagedCluster --resource-group MyResourceGroup --timeout 1800
crafted: true
"""
helps['aks rotate-certs'] = """
type: command
short-summary: Rotate certificates and keys on a managed Kubernetes cluster
long-summary: Kubernetes will be unavailable during cluster certificate rotation.
"""
helps['openshift'] = """
type: group
short-summary: Manage Azure Red Hat OpenShift 3.11 clusters.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
"""
helps['openshift create'] = """
type: command
short-summary: Create a new Azure Red Hat OpenShift 3.11 cluster.
long-summary: Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.
parameters:
- name: --compute-vm-size -s
type: string
short-summary: Size of Virtual Machines to create as OpenShift nodes.
- name: --compute-count -c
type: int
short-summary: Number of nodes in the OpenShift node pool.
- name: --aad-client-app-id
type: string
short-summary: The ID of an Azure Active Directory client application. If not specified, a new Azure Active Directory client is created.
- name: --aad-client-app-secret
type: string
short-summary: The secret of an Azure Active Directory client application.
- name: --aad-tenant-id
type: string
short-summary: The ID of an Azure Active Directory tenant.
- name: --vnet-peer
type: string
short-summary: The ID or the name of a subnet in an existing VNet into which to peer the cluster.
- name: --vnet-prefix
type: string
short-summary: The CIDR used on the VNet into which to deploy the cluster.
- name: --subnet-prefix
type: string
short-summary: The CIDR used on the Subnet into which to deploy the cluster.
- name: --customer-admin-group-id
type: string
short-summary: The Object ID of an Azure Active Directory Group that memberships will get synced into the OpenShift group "osa-customer-admins". If not specified, no cluster admin access will be granted.
- name: --workspace-id
type: string
short-summary: The resource id of an existing Log Analytics Workspace to use for storing monitoring data.
examples:
- name: Create an OpenShift cluster and auto create an AAD Client
text: az openshift create -g MyResourceGroup -n MyManagedCluster
- name: Create an OpenShift cluster and auto create an AAD Client and setup cluster admin group
text: az openshift create -g MyResourceGroup -n MyManagedCluster --customer-admin-group-id {GROUP_ID}
- name: Create an OpenShift cluster with 5 compute nodes and a custom AAD Client.
text: az openshift create -g MyResourceGroup -n MyManagedCluster --aad-client-app-id {APP_ID} --aad-client-app-secret {APP_SECRET} --aad-tenant-id {TENANT_ID} --compute-count 5
- name: Create an Openshift cluster using a custom vnet
text: az openshift create -g MyResourceGroup -n MyManagedCluster --vnet-peer "/subscriptions/0000000-0000-0000-0000-000000000000/resourceGroups/openshift-vnet/providers/Microsoft.Network/virtualNetworks/test"
- name: Create an Openshift cluster with Log Analytics monitoring enabled
text: az openshift create -g MyResourceGroup -n MyManagedCluster --workspace-id "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.OperationalInsights/workspaces/{workspace-id}"
"""
helps['openshift delete'] = """
type: command
short-summary: Delete an Azure Red Hat OpenShift 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Delete an Azure Red Hat OpenShift 3.11 cluster.
text: az openshift delete --name MyManagedOpenShiftCluster --resource-group MyResourceGroup
crafted: true
"""
helps['openshift list'] = """
type: command
short-summary: List Azure Red Hat OpenShift 3.11 clusters.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
"""
helps['openshift scale'] = """
type: command
short-summary: Scale the compute pool in an Azure Red Hat OpenShift 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
parameters:
- name: --compute-count -c
type: int
short-summary: Number of nodes in the OpenShift compute pool.
examples:
- name: Scale the compute pool in an Azure Red Hat OpenShift 3.11 cluster.
text: az openshift scale --compute-count 5 --name MyManagedOpenShiftCluster --resource-group MyResourceGroup
crafted: true
"""
helps['openshift show'] = """
type: command
short-summary: Show the details for an Azure Red Hat OpenShift 3.11 cluster.
long-summary: Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Show the details for an Azure Red Hat OpenShift 3.11 cluster.
text: az openshift show --name MyManagedOpenShiftCluster --resource-group MyResourceGroup
crafted: true
"""
helps['openshift wait'] = """
type: command
short-summary: Wait for an Azure Red Hat OpenShift 3.11 cluster to reach a desired state.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Wait for a cluster to be upgraded, polling every minute for up to thirty minutes.
text: |-
az openshift wait -g MyResourceGroup -n MyManagedCluster --updated --interval 60 --timeout 1800
"""
helps['openshift monitor'] = """
type: group
short-summary: Commands to manage Log Analytics monitoring in an ARO 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
"""
helps['openshift monitor enable'] = """
type: command
short-summary: Enable Log Analytics monitoring in an ARO 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Enable Log Analytics monitoring.
text: |-
az openshift monitor enable -g MyResourceGroup -n MyManagedCluster --workspace-id "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.OperationalInsights/workspaces/{workspace-id}"
"""
helps['openshift monitor disable'] = """
type: command
short-summary: Disable Log Analytics monitoring in an ARO 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Disable Log Analytics monitoring.
text: |-
az openshift monitor disable -g MyResourceGroup -n MyManagedCluster
"""
| 51.577132 | 337 | 0.732257 |
from knack.help_files import helps
helps['acs'] = """
type: group
short-summary: Manage Azure Container Services.
long-summary: |
ACS will be retired as a standalone service on January 31, 2020.
If you use the Kubernetes orchestrator, please migrate to AKS by January 31, 2020.
"""
helps['acs browse'] = """
type: command
short-summary: Show the dashboard for a service container's orchestrator in a web browser.
examples:
- name: Show the dashboard for a service container's orchestrator in a web browser. (autogenerated)
text: az acs browse --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs create'] = """
type: command
short-summary: Create a new container service.
parameters:
- name: --service-principal
type: string
short-summary: Service principal used for authentication to Azure APIs.
long-summary: If not specified, a new service principal with the contributor role is created and cached at $HOME/.azure/acsServicePrincipal.json to be used by subsequent `az acs` commands.
- name: --client-secret
type: string
short-summary: Secret associated with the service principal. This argument is required if `--service-principal` is specified.
- name: --agent-count
short-summary: Set the default number of agents for the agent pools.
long-summary: Note that DC/OS clusters will have 1 or 2 additional public agents.
examples:
- name: Create a DCOS cluster with an existing SSH key.
text: |-
az acs create --orchestrator-type DCOS -g MyResourceGroup -n MyContainerService \\
--ssh-key-value /path/to/publickey
- name: Create a DCOS cluster with two agent pools.
text: |-
az acs create -g MyResourceGroup -n MyContainerService --agent-profiles '[ \\
{ \\
"name": "agentpool1" \\
}, \\
{ \\
"name": "agentpool2" \\
}]'
- name: Create a DCOS cluster where the second agent pool has a vmSize specified.
text: |-
az acs create -g MyResourceGroup -n MyContainerService --agent-profiles '[ \\
{ \\
"name": "agentpool1" \\
}, \\
{ \\
"name": "agentpool2", \\
"vmSize": "Standard_D2" \\
}]'
- name: Create a DCOS cluster with agent-profiles specified from a file.
text: az acs create -g MyResourceGroup -n MyContainerService --agent-profiles MyAgentProfiles.json
"""
helps['acs dcos'] = """
type: group
short-summary: Commands to manage a DC/OS-orchestrated Azure Container Service.
"""
helps['acs dcos install-cli'] = """
type: command
short-summary: Download and install the DC/OS command-line tool for a cluster.
"""
helps['acs delete'] = """
type: command
short-summary: Delete a container service.
examples:
- name: Delete a container service. (autogenerated)
text: az acs delete --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs kubernetes'] = """
type: group
short-summary: Commands to manage a Kubernetes-orchestrated Azure Container Service.
"""
helps['acs kubernetes get-credentials'] = """
type: command
short-summary: Download and install credentials to access a cluster. This command requires the same private-key used to create the cluster.
parameters:
- name: --output -o
type: string
long-summary: Credentials are always in YAML format, so this argument is effectively ignored.
examples:
- name: Download and install credentials to access a cluster. This command requires the same private-key used to create the cluster. (autogenerated)
text: az acs kubernetes get-credentials --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs kubernetes install-cli'] = """
type: command
short-summary: Download and install the Kubernetes command-line tool for a cluster.
"""
helps['acs list'] = """
type: command
short-summary: List container services.
"""
helps['acs list-locations'] = """
type: command
short-summary: List locations where Azure Container Service is in preview and in production.
"""
helps['acs scale'] = """
type: command
short-summary: Change the private agent count of a container service.
parameters:
- name: --new-agent-count
type: int
short-summary: The number of agents for the container service.
examples:
- name: Change the private agent count of a container service. (autogenerated)
text: az acs scale --name MyContainerService --new-agent-count 10 --resource-group MyResourceGroup
crafted: true
"""
helps['acs show'] = """
type: command
short-summary: Show the details for a container service.
examples:
- name: Show the details for a container service. (autogenerated)
text: az acs show --name MyContainerService --resource-group MyResourceGroup
crafted: true
"""
helps['acs wait'] = """
type: command
short-summary: Wait for a container service to reach a desired state.
long-summary: If an operation on a container service was interrupted or was started with `--no-wait`, use this command to wait for it to complete.
"""
helps['aks'] = """
type: group
short-summary: Manage Azure Kubernetes Services.
"""
helps["aks check-acr"] = """
type: command
short-summary: Validate an ACR is accesible from an AKS cluster.
parameters:
- name: --acr
short-summary: The FQDN of the ACR.
examples:
- name: Validate the ACR is accesible from the AKS cluster.
text: az aks check-acr --name MyManagedCluster --resource-group MyResourceGroup --acr myacr.azurecr.io
crafted: true
"""
helps['aks browse'] = """
type: command
short-summary: Show the dashboard for a Kubernetes cluster in a web browser.
parameters:
- name: --disable-browser
type: bool
short-summary: Don't launch a web browser after establishing port-forwarding.
long-summary: Add this argument when launching a web browser manually, or for automated testing.
- name: --listen-port
short-summary: The listening port for the dashboard.
long-summary: Add this argument when the default listening port is used by another process or unavailable.
- name: --listen-address
short-summary: The listening address for the dashboard.
long-summary: Add this argument to listen on a specific IP address.
examples:
- name: Show the dashboard for a Kubernetes cluster in a web browser. (autogenerated)
text: az aks browse --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks create'] = """
type: command
short-summary: Create a new managed Kubernetes cluster.
parameters:
- name: --generate-ssh-keys
type: string
short-summary: Generate SSH public and private key files if missing. The keys will be stored in the ~/.ssh directory.
- name: --service-principal
type: string
short-summary: Service principal used for authentication to Azure APIs.
long-summary: If not specified, a new service principal is created and cached at $HOME/.azure/aksServicePrincipal.json to be used by subsequent `az aks` commands.
- name: --skip-subnet-role-assignment
type: bool
short-summary: Skip role assignment for subnet (advanced networking).
long-summary: If specified, please make sure your service principal has the access to your subnet.
- name: --client-secret
type: string
short-summary: Secret associated with the service principal. This argument is required if `--service-principal` is specified.
- name: --node-vm-size -s
type: string
short-summary: Size of Virtual Machines to create as Kubernetes nodes.
- name: --dns-name-prefix -p
type: string
short-summary: Prefix for hostnames that are created. If not specified, generate a hostname using the managed cluster and resource group names.
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes node pool. After creating a cluster, you can change the size of its node pool with `az aks scale`.
- name: --zones -z
type: string array
short-summary: Availability zones where agent nodes will be placed.
- name: --node-osdisk-size
type: int
short-summary: Size in GB of the OS disk for each node in the node pool. Minimum 30 GB.
- name: --node-osdisk-type
type: string
short-summary: "OS disk type to be used for machines in a given agent pool: Ephemeral or Managed. Defaults to 'Ephemeral' when possible in conjunction with VM size and OS disk size. May not be changed for this pool after creation."
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to use for creating the cluster, such as "1.16.9".
populator-commands:
- "`az aks get-versions`"
- name: --ssh-key-value
type: string
short-summary: Public key path or key contents to install on node VMs for SSH access. For example, 'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm'.
- name: --admin-username -u
type: string
short-summary: User account to create on node VMs for SSH access.
- name: --windows-admin-username
type: string
short-summary: Username to create on Windows node VMs.
- name: --windows-admin-password
type: string
short-summary: Password to create on Windows node VMs.
- name: --enable-ahub
type: bool
short-summary: Enable Azure Hybrid User Benefits (AHUB) for Windows VMs.
- name: --enable-aad
type: bool
short-summary: Enable managed AAD feature for cluster.
- name: --aad-admin-group-object-ids
type: string
short-summary: Comma seperated list of aad group object IDs that will be set as cluster admin.
- name: --aad-client-app-id
type: string
short-summary: The ID of an Azure Active Directory client application of type "Native". This application is for user login via kubectl.
- name: --aad-server-app-id
type: string
short-summary: The ID of an Azure Active Directory server application of type "Web app/API". This application represents the managed cluster's apiserver (Server application).
- name: --aad-server-app-secret
type: string
short-summary: The secret of an Azure Active Directory server application.
- name: --aad-tenant-id
type: string
short-summary: The ID of an Azure Active Directory tenant.
- name: --dns-service-ip
type: string
short-summary: An IP address assigned to the Kubernetes DNS service.
long-summary: This address must be within the Kubernetes service address range specified by "--service-cidr". For example, 10.0.0.10.
- name: --docker-bridge-address
type: string
short-summary: A specific IP address and netmask for the Docker bridge, using standard CIDR notation.
long-summary: This address must not be in any Subnet IP ranges, or the Kubernetes service address range. For example, 172.17.0.1/16.
- name: --load-balancer-sku
type: string
short-summary: Azure Load Balancer SKU selection for your cluster. basic or standard.
long-summary: Select between Basic or Standard Azure Load Balancer SKU for your AKS cluster.
- name: --load-balancer-managed-outbound-ip-count
type: int
short-summary: Load balancer managed outbound IP count.
long-summary: Desired number of managed outbound IPs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only.
- name: --load-balancer-outbound-ips
type: string
short-summary: Load balancer outbound IP resource IDs.
long-summary: Comma-separated public IP resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only.
- name: --load-balancer-outbound-ip-prefixes
type: string
short-summary: Load balancer outbound IP prefix resource IDs.
long-summary: Comma-separated public IP prefix resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only.
- name: --load-balancer-outbound-ports
type: int
short-summary: Load balancer outbound allocated ports.
long-summary: Desired static number of outbound ports per VM in the load balancer backend pool. By default, set to 0 which uses the default allocation based on the number of VMs.
- name: --load-balancer-idle-timeout
type: int
short-summary: Load balancer idle timeout in minutes.
long-summary: Desired idle timeout for load balancer outbound flows, default is 30 minutes. Please specify a value in the range of [4, 100].
- name: --outbound-type
type: string
short-summary: How outbound traffic will be configured for a cluster.
long-summary: Select between loadBalancer and userDefinedRouting. If not set, defaults to type loadBalancer. Requires --vnet-subnet-id to be provided with a preconfigured route table and --load-balancer-sku to be Standard.
- name: --enable-cluster-autoscaler
type: bool
short-summary: Enable cluster autoscaler, default value is false.
long-summary: If specified, please make sure the kubernetes version is larger than 1.10.6.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100].
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100].
- name: --vm-set-type
type: string
short-summary: Agent pool vm set type. VirtualMachineScaleSets or AvailabilitySet.
- name: --enable-addons -a
type: string
short-summary: Enable the Kubernetes addons in a comma-separated list.
long-summary: |-
These addons are available:
http_application_routing - configure ingress with automatic public DNS name creation.
monitoring - turn on Log Analytics monitoring. Uses the Log Analytics Default Workspace if it exists, else creates one.
Specify "--workspace-resource-id" to use an existing workspace.
If monitoring addon is enabled --no-wait argument will have no effect
azure-policy - enable Azure policy. The Azure Policy add-on for AKS enables at-scale enforcements and safeguards on your clusters in a centralized, consistent manner.
Learn more at aka.ms/aks/policy.
virtual-node - enable AKS Virtual Node.
Requires --aci-subnet-name to provide the name of an existing subnet for the Virtual Node to use.
aci-subnet-name must be in the same vnet which is specified by --vnet-subnet-id (required as well).
confcom - enable confcom addon, this will enable SGX device plugin by default.
- name: --disable-rbac
type: bool
short-summary: Disable Kubernetes Role-Based Access Control.
- name: --enable-rbac -r
type: bool
short-summary: "Enable Kubernetes Role-Based Access Control. Default: enabled."
- name: --max-pods -m
type: int
short-summary: The maximum number of pods deployable to a node.
long-summary: If not specified, defaults to 110, or 30 for advanced networking configurations.
- name: --network-plugin
type: string
short-summary: The Kubernetes network plugin to use.
long-summary: Specify "azure" for advanced networking configurations. Defaults to "kubenet".
- name: --network-policy
type: string
short-summary: The Kubernetes network policy to use.
long-summary: |
Using together with "azure" network plugin.
Specify "azure" for Azure network policy manager and "calico" for calico network policy controller.
Defaults to "" (network policy disabled).
- name: --no-ssh-key -x
type: string
short-summary: Do not use or create a local SSH key.
long-summary: To access nodes after creating a cluster with this option, use the Azure Portal.
- name: --pod-cidr
type: string
short-summary: A CIDR notation IP range from which to assign pod IPs when kubenet is used.
long-summary: This range must not overlap with any Subnet IP ranges. For example, 172.244.0.0/16.
- name: --service-cidr
type: string
short-summary: A CIDR notation IP range from which to assign service cluster IPs.
long-summary: This range must not overlap with any Subnet IP ranges. For example, 10.0.0.0/16.
- name: --vnet-subnet-id
type: string
short-summary: The ID of a subnet in an existing VNet into which to deploy the cluster.
- name: --ppg
type: string
short-summary: The ID of a PPG.
- name: --enable-node-public-ip
type: bool
short-summary: Enable VMSS node public IP.
- name: --workspace-resource-id
type: string
short-summary: The resource ID of an existing Log Analytics Workspace to use for storing monitoring data. If not specified, uses the default Log Analytics Workspace if it exists, otherwise creates one.
- name: --uptime-sla
type: bool
short-summary: Enable a paid managed cluster service with a financially backed SLA.
- name: --attach-acr
type: string
short-summary: Grant the 'acrpull' role assignment to the ACR specified by name or resource ID.
- name: --enable-private-cluster
type: string
short-summary: Enable private cluster.
- name: --api-server-authorized-ip-ranges
type: string
short-summary: Comma seperated list of authorized apiserver IP ranges. Set to 0.0.0.0/32 to restrict apiserver traffic to node pools.
- name: --enable-managed-identity
type: bool
short-summary: Using a system assigned managed identity to manage cluster resource group.
- name: --assign-identity
type: string
short-summary: Specify an existing user assigned identity for control plane's usage in order to manage cluster resource group.
- name: --node-osdisk-diskencryptionset-id -d
type: string
short-summary: ResourceId of the disk encryption set to use for enabling encryption at rest on agent node os disk.
- name: --aci-subnet-name
type: string
short-summary: The name of a subnet in an existing VNet into which to deploy the virtual nodes.
- name: --appgw-name
type: string
short-summary: Name of the application gateway to create/use in the node resource group. Use with ingress-azure addon.
- name: --appgw-subnet-cidr
type: string
short-summary: Subnet CIDR to use for a new subnet created to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-id
type: string
short-summary: Resource Id of an existing Application Gateway to use with AGIC. Use with ingress-azure addon.
- name: --appgw-subnet-id
type: string
short-summary: Resource Id of an existing Subnet used to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-watch-namespace
type: string
short-summary: Specify the namespace, which AGIC should watch. This could be a single string value, or a comma-separated list of namespaces.
- name: --enable-sgxquotehelper
type: bool
short-summary: Enable SGX quote helper for confcom addon.
examples:
- name: Create a Kubernetes cluster with an existing SSH public key.
text: az aks create -g MyResourceGroup -n MyManagedCluster --ssh-key-value /path/to/publickey
- name: Create a Kubernetes cluster with a specific version.
text: az aks create -g MyResourceGroup -n MyManagedCluster --kubernetes-version 1.16.9
- name: Create a Kubernetes cluster with a larger node pool.
text: az aks create -g MyResourceGroup -n MyManagedCluster --node-count 7
- name: Create a kubernetes cluster with k8s 1.13.9 but use vmas.
text: az aks create -g MyResourceGroup -n MyManagedCluster --kubernetes-version 1.16.9 --vm-set-type AvailabilitySet
- name: Create a kubernetes cluster with default kubernetes version, default SKU load balancer (Standard) and default vm set type (VirtualMachineScaleSets).
text: az aks create -g MyResourceGroup -n MyManagedCluster
- name: Create a kubernetes cluster with standard SKU load balancer and two AKS created IPs for the load balancer outbound connection usage.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2
- name: Create a kubernetes cluster with a standard SKU load balancer, with two outbound AKS managed IPs an idle flow timeout of 5 minutes and 8000 allocated ports per machine
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2 --load-balancer-idle-timeout 5 --load-balancer-outbound-ports 8000
- name: Create a kubernetes cluster with standard SKU load balancer and use the provided public IPs for the load balancer outbound connection usage.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ips <ip-resource-id-1,ip-resource-id-2>
- name: Create a kubernetes cluster with standard SKU load balancer and use the provided public IP prefixes for the load balancer outbound connection usage.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ip-prefixes <ip-prefix-resource-id-1,ip-prefix-resource-id-2>
- name: Create a kubernetes cluster with basic SKU load balancer and AvailabilitySet vm set type.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-sku basic --vm-set-type AvailabilitySet
- name: Create a kubernetes cluster with authorized apiserver IP ranges.
text: az aks create -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges 193.168.1.0/24,194.168.1.0/24,195.168.1.0
- name: Create a kubernetes cluster which enables managed identity.
text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-managed-identity
- name: Create a kubernetes cluster with userDefinedRouting, standard load balancer SKU and a custom subnet preconfigured with a route table
text: az aks create -g MyResourceGroup -n MyManagedCluster --outbound-type userDefinedRouting --load-balancer-sku standard --vnet-subnet-id customUserSubnetVnetID
- name: Create a kubernetes cluster with supporting Windows agent pools.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-sku Standard --network-plugin azure --windows-admin-username azure --windows-admin-password 'replacePassword1234$'
- name: Create a kubernetes cluster with supporting Windows agent pools with AHUB enabled.
text: az aks create -g MyResourceGroup -n MyManagedCluster --load-balancer-sku Standard --network-plugin azure --windows-admin-username azure --windows-admin-password 'replacePassword1234$' --enable-ahub
- name: Create a kubernetes cluster with managed AAD enabled.
text: az aks create -g MyResourceGroup -n MyManagedCluster --enable-aad --aad-admin-group-object-ids <id-1,id-2> --aad-tenant-id <id>
- name: Create a kubernetes cluster with server side encryption using your owned key.
text: az aks create -g MyResourceGroup -n MyManagedCluster --node-osdisk-diskencryptionset-id <disk-encryption-set-resource-id>
- name: Create a kubernetes cluster with ephemeral OS enabled.
text: az aks create -g MyResourceGroup -n MyManagedCluster --node-osdisk-type Ephemeral --node-osdisk-size 48
"""
helps['aks update'] = """
type: command
short-summary: Update a managed Kubernetes cluster.
parameters:
- name: --enable-cluster-autoscaler -e
type: bool
short-summary: Enable cluster autoscaler.
- name: --disable-cluster-autoscaler -d
type: bool
short-summary: Disable cluster autoscaler.
- name: --update-cluster-autoscaler -u
type: bool
short-summary: Update min-count or max-count for cluster autoscaler.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --uptime-sla
type: bool
short-summary: Enable a paid managed cluster service with a financially backed SLA.
- name: --load-balancer-managed-outbound-ip-count
type: int
short-summary: Load balancer managed outbound IP count.
long-summary: Desired number of managed outbound IPs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only. If updated, it will wipe off the existing setting on Load balancer managed outbound IP count; Load balancer outbound IP resource IDs and Load balancer outbound IP prefix resource IDs.
- name: --load-balancer-outbound-ips
type: string
short-summary: Load balancer outbound IP resource IDs.
long-summary: Comma-separated public IP resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only. If updated, it will wipe off the existing setting on Load balancer managed outbound IP count; Load balancer outbound IP resource IDs and Load balancer outbound IP prefix resource IDs.
- name: --load-balancer-outbound-ip-prefixes
type: string
short-summary: Load balancer outbound IP prefix resource IDs.
long-summary: Comma-separated public IP prefix resource IDs for load balancer outbound connection. Valid for Standard SKU load balancer cluster only. If updated, it will wipe off the existing setting on Load balancer managed outbound IP count; Load balancer outbound IP resource IDs and Load balancer outbound IP prefix resource IDs.
- name: --load-balancer-outbound-ports
type: int
short-summary: Load balancer outbound allocated ports.
long-summary: Desired static number of outbound ports per VM in the load balancer backend pool. By default, set to 0 which uses the default allocation based on the number of VMs.
- name: --load-balancer-idle-timeout
type: int
short-summary: Load balancer idle timeout in minutes.
long-summary: Desired idle timeout for load balancer outbound flows, default is 30 minutes. Please specify a value in the range of [4, 100].
- name: --attach-acr
type: string
short-summary: Grant the 'acrpull' role assignment to the ACR specified by name or resource ID.
- name: --detach-acr
type: string
short-summary: Disable the 'acrpull' role assignment to the ACR specified by name or resource ID.
- name: --api-server-authorized-ip-ranges
type: string
short-summary: Comma seperated list of authorized apiserver IP ranges. Set to "" to allow all traffic on a previously restricted cluster. Set to 0.0.0.0/32 to restrict apiserver traffic to node pools.
- name: --enable-aad
type: bool
short-summary: Enable managed AAD feature for cluster.
- name: --aad-admin-group-object-ids
type: string
short-summary: Comma seperated list of aad group object IDs that will be set as cluster admin.
- name: --aad-tenant-id
type: string
short-summary: The ID of an Azure Active Directory tenant.
- name: --enable-ahub
type: bool
short-summary: Enable Azure Hybrid User Benefits (AHUB) feature for cluster.
- name: --disable-ahub
type: bool
short-summary: Disable Azure Hybrid User Benefits (AHUB) feature for cluster.
examples:
- name: Update a kubernetes cluster with standard SKU load balancer to use two AKS created IPs for the load balancer outbound connection usage.
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2
- name: Update a kubernetes cluster with standard SKU load balancer to use the provided public IPs for the load balancer outbound connection usage.
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ips <ip-resource-id-1,ip-resource-id-2>
- name: Create a kubernetes cluster with a standard SKU load balancer, with two outbound AKS managed IPs an idle flow timeout of 5 minutes and 8000 allocated ports per machine
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-managed-outbound-ip-count 2 --load-balancer-idle-timeout 5 --load-balancer-outbound-ports 8000
- name: Update a kubernetes cluster with standard SKU load balancer to use the provided public IP prefixes for the load balancer outbound connection usage.
text: az aks update -g MyResourceGroup -n MyManagedCluster --load-balancer-outbound-ip-prefixes <ip-prefix-resource-id-1,ip-prefix-resource-id-2>
- name: Attach AKS cluster to ACR by name "acrName"
text: az aks update -g MyResourceGroup -n MyManagedCluster --attach-acr acrName
- name: Update a kubernetes cluster with authorized apiserver ip ranges.
text: az aks update -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges 193.168.1.0/24,194.168.1.0/24
- name: Disable authorized apiserver ip ranges feature for a kubernetes cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges ""
- name: Restrict apiserver traffic in a kubernetes cluster to agentpool nodes.
text: az aks update -g MyResourceGroup -n MyManagedCluster --api-server-authorized-ip-ranges 0.0.0.0/32
- name: Update a AKS-managed AAD cluster with tenant ID or admin group object IDs.
text: az aks update -g MyResourceGroup -n MyManagedCluster --aad-admin-group-object-ids <id-1,id-2> --aad-tenant-id <id>
- name: Migrate a AKS AAD-Integrated cluster or a non-AAD cluster to a AKS-managed AAD cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-aad --aad-admin-group-object-ids <id-1,id-2> --aad-tenant-id <id>
- name: Enable Azure Hybrid User Benefits featture for a kubernetes cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --enable-ahub
- name: Disable Azure Hybrid User Benefits featture for a kubernetes cluster.
text: az aks update -g MyResourceGroup -n MyManagedCluster --disable-ahub
"""
helps['aks delete'] = """
type: command
short-summary: Delete a managed Kubernetes cluster.
examples:
- name: Delete a managed Kubernetes cluster. (autogenerated)
text: az aks delete --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks disable-addons'] = """
type: command
short-summary: Disable Kubernetes addons.
parameters:
- name: --addons -a
type: string
short-summary: Disable the Kubernetes addons in a comma-separated list.
examples:
- name: Disable Kubernetes addons. (autogenerated)
text: az aks disable-addons --addons virtual-node --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks enable-addons'] = """
type: command
short-summary: Enable Kubernetes addons.
long-summary: |-
These addons are available:
http_application_routing - configure ingress with automatic public DNS name creation.
monitoring - turn on Log Analytics monitoring. Requires "--workspace-resource-id".
If monitoring addon is enabled --no-wait argument will have no effect
virtual-node - enable AKS Virtual Node. Requires --subnet-name to provide the name of an existing subnet for the Virtual Node to use.
azure-policy - enable Azure policy. The Azure Policy add-on for AKS enables at-scale enforcements and safeguards on your clusters in a centralized, consistent manner.
Learn more at aka.ms/aks/policy.
ingress-appgw - enable Application Gateway Ingress Controller addon.
parameters:
- name: --addons -a
type: string
short-summary: Enable the Kubernetes addons in a comma-separated list.
- name: --workspace-resource-id
type: string
short-summary: The resource ID of an existing Log Analytics Workspace to use for storing monitoring data.
- name: --appgw-name
type: string
short-summary: Name of the application gateway to create/use in the node resource group. Use with ingress-azure addon.
- name: --appgw-subnet-cidr
type: string
short-summary: Subnet CIDR to use for a new subnet created to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-id
type: string
short-summary: Resource Id of an existing Application Gateway to use with AGIC. Use with ingress-azure addon.
- name: --appgw-subnet-id
type: string
short-summary: Resource Id of an existing Subnet used to deploy the Application Gateway. Use with ingress-azure addon.
- name: --appgw-watch-namespace
type: string
short-summary: Specify the namespace, which AGIC should watch. This could be a single string value, or a comma-separated list of namespaces.
- name: --enable-sgxquotehelper
type: bool
short-summary: Enable SGX quote helper for confcom addon.
examples:
- name: Enable Kubernetes addons. (autogenerated)
text: az aks enable-addons --addons virtual-node --name MyManagedCluster --resource-group MyResourceGroup --subnet MySubnetName
crafted: true
- name: Enable ingress-appgw addon with subnet prefix.
text: az aks enable-addons --name MyManagedCluster --resource-group MyResourceGroup --addons ingress-appgw --appgw-subnet-cidr 10.2.0.0/16 --appgw-name gateway
crafted: true
"""
helps['aks get-credentials'] = """
type: command
short-summary: Get access credentials for a managed Kubernetes cluster.
long-summary: By default, the credentials are merged into the .kube/config file so kubectl can use them. See -f parameter for details.
parameters:
- name: --admin -a
type: bool
short-summary: "Get cluster administrator credentials. Default: cluster user credentials."
long-summary: "On clusters with Azure Active Directory integration, this bypasses normal Azure AD authentication and can be used if you're permanently blocked by not having access to a valid Azure AD group with access to your cluster. Requires 'Azure Kubernetes Service Cluster Admin' role."
- name: --file -f
type: string
short-summary: Kubernetes configuration file to update. Use "-" to print YAML to stdout instead.
- name: --overwrite-existing
type: bool
short-summary: Overwrite any existing cluster entry with the same name.
- name: --output -o
type: string
long-summary: Credentials are always in YAML format, so this argument is effectively ignored.
examples:
- name: Get access credentials for a managed Kubernetes cluster. (autogenerated)
text: az aks get-credentials --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks get-upgrades'] = """
type: command
short-summary: Get the upgrade versions available for a managed Kubernetes cluster.
examples:
- name: Get the upgrade versions available for a managed Kubernetes cluster
text: az aks get-upgrades --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks get-versions'] = """
type: command
short-summary: Get the versions available for creating a managed Kubernetes cluster.
examples:
- name: Get the versions available for creating a managed Kubernetes cluster
text: az aks get-versions --location westus2
crafted: true
"""
helps['aks install-cli'] = """
type: command
short-summary: Download and install kubectl, the Kubernetes command-line tool. Download and install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
helps['aks list'] = """
type: command
short-summary: List managed Kubernetes clusters.
"""
helps['aks nodepool'] = """
type: group
short-summary: Commands to manage node pools in Kubernetes kubernetes cluster.
"""
helps['aks nodepool add'] = """
type: command
short-summary: Add a node pool to the managed Kubernetes cluster.
parameters:
- name: --node-vm-size -s
type: string
short-summary: Size of Virtual Machines to create as Kubernetes nodes.
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes agent pool. After creating a cluster, you can change the size of its node pool with `az aks scale`.
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to use for creating the cluster, such as "1.16.9".
populator-commands:
- "`az aks get-versions`"
- name: --node-osdisk-size
type: int
short-summary: Size in GB of the OS disk for each node in the agent pool. Minimum 30 GB.
- name: --node-osdisk-type
type: string
short-summary: OS disk type to be used for machines in a given agent pool. Defaults to 'Ephemeral' when possible in conjunction with VM size and OS disk size. May not be changed for this pool after creation.
- name: --max-pods -m
type: int
short-summary: The maximum number of pods deployable to a node.
long-summary: If not specified, defaults to 110, or 30 for advanced networking configurations.
- name: --zones -z
type: string array
short-summary: Availability zones where agent nodes will be placed.
- name: --enable-node-public-ip
type: bool
short-summary: Enable VMSS node public IP.
- name: --vnet-subnet-id
type: string
short-summary: The ID of a subnet in an existing VNet into which to deploy the cluster.
- name: --ppg
type: string
short-summary: The ID of a PPG.
- name: --os-type
type: string
short-summary: The OS Type. Linux or Windows.
- name: --enable-cluster-autoscaler -e
type: bool
short-summary: Enable cluster autoscaler.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --node-taints
type: string
short-summary: The node taints for the node pool. You can't change the node taints through CLI after the node pool is created.
- name: --labels
type: string
short-summary: The node labels for the node pool. You can't change the node labels through CLI after the node pool is created. See https://aka.ms/node-labels for syntax of labels.
- name: --mode
type: string
short-summary: The mode for a node pool which defines a node pool's primary function. If set as "System", AKS prefers system pods scheduling to node pools with mode `System`. Learn more at https://aka.ms/aks/nodepool/mode.
- name: --priority
type: string
short-summary: The priority of the node pool.
- name: --eviction-policy
type: string
short-summary: The eviction policy of the Spot node pool. It can only be set when --priority is Spot.
- name: --spot-max-price
type: float
short-summary: It can only be set when --priority is Spot. Specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand. It can only include up to 5 decimal places.
- name: --max-surge
type: string
short-summary: Extra nodes used to speed upgrade. When specified, it represents the number or percent used, eg. 5 or 33%
examples:
- name: Create a nodepool in an existing AKS cluster with ephemeral os enabled.
text: az aks nodepool add -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster --node-osdisk-type Ephemeral --node-osdisk-size 48
"""
helps['aks nodepool delete'] = """
type: command
short-summary: Delete the agent pool in the managed Kubernetes cluster.
"""
helps['aks nodepool get-upgrades'] = """
type: command
short-summary: Get the available upgrade versions for an agent pool of the managed Kubernetes cluster.
examples:
- name: Get the available upgrade versions for an agent pool of the managed Kubernetes cluster.
text: az aks nodepool get-upgrades --resource-group MyResourceGroup --cluster-name MyManagedCluster --nodepool-name MyNodePool
crafted: true
parameters:
- name: --nodepool-name
type: string
short-summary: name of the node pool.
"""
helps['aks nodepool list'] = """
type: command
short-summary: List node pools in the managed Kubernetes cluster.
"""
helps['aks nodepool scale'] = """
type: command
short-summary: Scale the node pool in a managed Kubernetes cluster.
parameters:
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes node pool.
"""
helps['aks nodepool show'] = """
type: command
short-summary: Show the details for a node pool in the managed Kubernetes cluster.
"""
helps['aks nodepool update'] = """
type: command
short-summary: Update a node pool to enable/disable cluster-autoscaler or change min-count or max-count
parameters:
- name: --enable-cluster-autoscaler -e
type: bool
short-summary: Enable cluster autoscaler.
- name: --disable-cluster-autoscaler -d
type: bool
short-summary: Disable cluster autoscaler.
- name: --update-cluster-autoscaler -u
type: bool
short-summary: Update min-count or max-count for cluster autoscaler.
- name: --min-count
type: int
short-summary: Minimum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --max-count
type: int
short-summary: Maximum nodes count used for autoscaler, when "--enable-cluster-autoscaler" specified. Please specify the value in the range of [1, 100]
- name: --mode
type: string
short-summary: The mode for a node pool which defines a node pool's primary function. If set as "System", AKS prefers system pods scheduling to node pools with mode `System`. Learn more at https://aka.ms/aks/nodepool/mode.
- name: --max-surge
type: string
short-summary: Extra nodes used to speed upgrade. When specified, it represents the number or percent used, eg. 5 or 33%
examples:
- name: Enable cluster-autoscaler within node count range [1,5]
text: az aks nodepool update --enable-cluster-autoscaler --min-count 1 --max-count 5 -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster
- name: Disable cluster-autoscaler for an existing cluster
text: az aks nodepool update --disable-cluster-autoscaler -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster
- name: Update min-count or max-count for cluster autoscaler.
text: az aks nodepool update --update-cluster-autoscaler --min-count 1 --max-count 10 -g MyResourceGroup -n nodepool1 --cluster-name MyManagedCluster
"""
helps['aks nodepool upgrade'] = """
type: command
short-summary: Upgrade the node pool in a managed Kubernetes cluster.
parameters:
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to upgrade the node pool to, such as "1.16.9".
- name: --node-image-only
type: bool
short-summary: Only upgrade agent pool's node image.
- name: --max-surge
type: string
short-summary: Extra nodes used to speed upgrade. When specified, it represents the number or percent used, eg. 5 or 33%
"""
helps['aks remove-dev-spaces'] = """
type: command
short-summary: Remove Azure Dev Spaces from a managed Kubernetes cluster.
examples:
- name: Remove Azure Dev Spaces from a managed Kubernetes cluster.
text: |-
az aks remove-dev-spaces -g my-aks-group -n my-aks
- name: Remove Azure Dev Spaces from a managed Kubernetes cluster without prompting.
text: |-
az aks remove-dev-spaces -g my-aks-group -n my-aks --yes
"""
helps['aks scale'] = """
type: command
short-summary: Scale the node pool in a managed Kubernetes cluster.
parameters:
- name: --node-count -c
type: int
short-summary: Number of nodes in the Kubernetes node pool.
examples:
- name: Scale the node pool in a managed Kubernetes cluster. (autogenerated)
text: az aks scale --name MyManagedCluster --node-count 3 --resource-group MyResourceGroup
crafted: true
"""
helps['aks show'] = """
type: command
short-summary: Show the details for a managed Kubernetes cluster.
examples:
- name: Show the details for a managed Kubernetes cluster
text: az aks show --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks update-credentials'] = """
type: command
short-summary: Update credentials for a managed Kubernetes cluster, like service principal.
parameters:
- name: --reset-service-principal
type: bool
short-summary: Reset service principal for a managed cluster.
- name: --service-principal
type: string
short-summary: Service principal used for authentication to Azure APIs. This argument is required if `--reset-service-principal` is specified.
- name: --client-secret
type: string
short-summary: Secret associated with the service principal. This argument is required if `--service-principal` is specified.
- name: --reset-aad
type: string
short-summary: Reset Azure Active Directory configuration for a managed cluster.
- name: --aad-server-app-id
type: string
short-summary: The ID of an Azure Active Directory server application. This argument is required if `--reset-aad` is specified.
- name: --aad-server-app-secret
type: string
short-summary: The secret of an Azure Active Directory server application. This argument is required if `--reset-aad` is specified.
- name: --aad-client-app-id
type: string
short-summary: The ID of an Azure Active Directory client application. This argument is required if `--reset-aad` is specified.
- name: --aad-tenant-id
type: string
short-summary: Tenant ID associated with Azure Active Directory.
examples:
- name: Update an existing Kubernetes cluster with new service principal.
text: az aks update-credentials -g MyResourceGroup -n MyManagedCluster --reset-service-principal --service-principal MyNewServicePrincipalID --client-secret MyNewServicePrincipalSecret
- name: Update an existing Azure Active Directory Kubernetes cluster with new server app secret key.
text: az aks update-credentials -g MyResourceGroup -n MyManagedCluster --reset-aad --aad-server-app-id MyExistingAADServerAppID --aad-server-app-secret MyNewAADServerAppSecret --aad-client-app-id MyExistingAADClientAppID --aad-tenant-id MyAADTenantID
"""
helps['aks upgrade'] = """
type: command
short-summary: Upgrade a managed Kubernetes cluster to a newer version.
long-summary: "Kubernetes will be unavailable during cluster upgrades."
parameters:
- name: --kubernetes-version -k
type: string
short-summary: Version of Kubernetes to upgrade the cluster to, such as "1.16.9".
populator-commands:
- "`az aks get-upgrades`"
- name: --control-plane-only
type: bool
short-summary: Upgrade the cluster control plane only. If not specified, both control plane AND all node pools will be upgraded.
- name: --node-image-only
type: bool
short-summary: Only upgrade node image for agent pools.
examples:
- name: Upgrade a managed Kubernetes cluster to a newer version. (autogenerated)
text: az aks upgrade --kubernetes-version 1.12.6 --name MyManagedCluster --resource-group MyResourceGroup
crafted: true
"""
helps['aks use-dev-spaces'] = """
type: command
short-summary: Use Azure Dev Spaces with a managed Kubernetes cluster.
parameters:
- name: --update
type: bool
short-summary: Update to the latest Azure Dev Spaces client components.
- name: --space -s
type: string
short-summary: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
- name: --endpoint -e
type: string
short-summary: The endpoint type to be used for a Azure Dev Spaces controller. See https://aka.ms/azds-networking for more information.
examples:
- name: Use Azure Dev Spaces with a managed Kubernetes cluster, interactively selecting a dev space.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks
- name: Use Azure Dev Spaces with a managed Kubernetes cluster, updating to the latest Azure Dev Spaces \
client components and selecting a new or existing dev space 'my-space'.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks --update --space my-space
- name: Use Azure Dev Spaces with a managed Kubernetes cluster, selecting a new or existing dev space \
'develop/my-space' without prompting for confirmation.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks -s develop/my-space -y
- name: Use Azure Dev Spaces with a managed Kubernetes cluster with a private endpoint.
text: |-
az aks use-dev-spaces -g my-aks-group -n my-aks -e private
"""
helps['aks wait'] = """
type: command
short-summary: Wait for a managed Kubernetes cluster to reach a desired state.
long-summary: If an operation on a cluster was interrupted or was started with `--no-wait`, use this command to wait for it to complete.
examples:
- name: Wait for a cluster to be upgraded, polling every minute for up to thirty minutes.
text: |-
az aks wait -g MyResourceGroup -n MyManagedCluster --updated --interval 60 --timeout 1800
- name: Wait for a managed Kubernetes cluster to reach a desired state (autogenerated)
text: az aks wait --created --interval 60 --name MyManagedCluster --resource-group MyResourceGroup --timeout 1800
crafted: true
"""
helps['aks rotate-certs'] = """
type: command
short-summary: Rotate certificates and keys on a managed Kubernetes cluster
long-summary: Kubernetes will be unavailable during cluster certificate rotation.
"""
helps['openshift'] = """
type: group
short-summary: Manage Azure Red Hat OpenShift 3.11 clusters.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
"""
helps['openshift create'] = """
type: command
short-summary: Create a new Azure Red Hat OpenShift 3.11 cluster.
long-summary: Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.
parameters:
- name: --compute-vm-size -s
type: string
short-summary: Size of Virtual Machines to create as OpenShift nodes.
- name: --compute-count -c
type: int
short-summary: Number of nodes in the OpenShift node pool.
- name: --aad-client-app-id
type: string
short-summary: The ID of an Azure Active Directory client application. If not specified, a new Azure Active Directory client is created.
- name: --aad-client-app-secret
type: string
short-summary: The secret of an Azure Active Directory client application.
- name: --aad-tenant-id
type: string
short-summary: The ID of an Azure Active Directory tenant.
- name: --vnet-peer
type: string
short-summary: The ID or the name of a subnet in an existing VNet into which to peer the cluster.
- name: --vnet-prefix
type: string
short-summary: The CIDR used on the VNet into which to deploy the cluster.
- name: --subnet-prefix
type: string
short-summary: The CIDR used on the Subnet into which to deploy the cluster.
- name: --customer-admin-group-id
type: string
short-summary: The Object ID of an Azure Active Directory Group that memberships will get synced into the OpenShift group "osa-customer-admins". If not specified, no cluster admin access will be granted.
- name: --workspace-id
type: string
short-summary: The resource id of an existing Log Analytics Workspace to use for storing monitoring data.
examples:
- name: Create an OpenShift cluster and auto create an AAD Client
text: az openshift create -g MyResourceGroup -n MyManagedCluster
- name: Create an OpenShift cluster and auto create an AAD Client and setup cluster admin group
text: az openshift create -g MyResourceGroup -n MyManagedCluster --customer-admin-group-id {GROUP_ID}
- name: Create an OpenShift cluster with 5 compute nodes and a custom AAD Client.
text: az openshift create -g MyResourceGroup -n MyManagedCluster --aad-client-app-id {APP_ID} --aad-client-app-secret {APP_SECRET} --aad-tenant-id {TENANT_ID} --compute-count 5
- name: Create an Openshift cluster using a custom vnet
text: az openshift create -g MyResourceGroup -n MyManagedCluster --vnet-peer "/subscriptions/0000000-0000-0000-0000-000000000000/resourceGroups/openshift-vnet/providers/Microsoft.Network/virtualNetworks/test"
- name: Create an Openshift cluster with Log Analytics monitoring enabled
text: az openshift create -g MyResourceGroup -n MyManagedCluster --workspace-id "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.OperationalInsights/workspaces/{workspace-id}"
"""
helps['openshift delete'] = """
type: command
short-summary: Delete an Azure Red Hat OpenShift 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Delete an Azure Red Hat OpenShift 3.11 cluster.
text: az openshift delete --name MyManagedOpenShiftCluster --resource-group MyResourceGroup
crafted: true
"""
helps['openshift list'] = """
type: command
short-summary: List Azure Red Hat OpenShift 3.11 clusters.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
"""
helps['openshift scale'] = """
type: command
short-summary: Scale the compute pool in an Azure Red Hat OpenShift 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
parameters:
- name: --compute-count -c
type: int
short-summary: Number of nodes in the OpenShift compute pool.
examples:
- name: Scale the compute pool in an Azure Red Hat OpenShift 3.11 cluster.
text: az openshift scale --compute-count 5 --name MyManagedOpenShiftCluster --resource-group MyResourceGroup
crafted: true
"""
helps['openshift show'] = """
type: command
short-summary: Show the details for an Azure Red Hat OpenShift 3.11 cluster.
long-summary: Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Show the details for an Azure Red Hat OpenShift 3.11 cluster.
text: az openshift show --name MyManagedOpenShiftCluster --resource-group MyResourceGroup
crafted: true
"""
helps['openshift wait'] = """
type: command
short-summary: Wait for an Azure Red Hat OpenShift 3.11 cluster to reach a desired state.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Wait for a cluster to be upgraded, polling every minute for up to thirty minutes.
text: |-
az openshift wait -g MyResourceGroup -n MyManagedCluster --updated --interval 60 --timeout 1800
"""
helps['openshift monitor'] = """
type: group
short-summary: Commands to manage Log Analytics monitoring in an ARO 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
"""
helps['openshift monitor enable'] = """
type: command
short-summary: Enable Log Analytics monitoring in an ARO 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Enable Log Analytics monitoring.
text: |-
az openshift monitor enable -g MyResourceGroup -n MyManagedCluster --workspace-id "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/MyResourceGroup/providers/Microsoft.OperationalInsights/workspaces/{workspace-id}"
"""
helps['openshift monitor disable'] = """
type: command
short-summary: Disable Log Analytics monitoring in an ARO 3.11 cluster.
long-summary: The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.
examples:
- name: Disable Log Analytics monitoring.
text: |-
az openshift monitor disable -g MyResourceGroup -n MyManagedCluster
"""
| true | true |
1c39b63189d497454d0be46622f33bef68588c43 | 4,451 | py | Python | nipyapi/nifi/models/processor_status_entity.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | null | null | null | nipyapi/nifi/models/processor_status_entity.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | 1 | 2018-11-13T21:01:33.000Z | 2018-11-13T21:01:33.000Z | nipyapi/nifi/models/processor_status_entity.py | Paul-Verardi/nipyapi | 7a709611d9cf30e4ce8943db4d4dd617f2f7c81c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.7.1
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ProcessorStatusEntity(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'processor_status': 'ProcessorStatusDTO',
'can_read': 'bool'
}
attribute_map = {
'processor_status': 'processorStatus',
'can_read': 'canRead'
}
def __init__(self, processor_status=None, can_read=None):
"""
ProcessorStatusEntity - a model defined in Swagger
"""
self._processor_status = None
self._can_read = None
if processor_status is not None:
self.processor_status = processor_status
if can_read is not None:
self.can_read = can_read
@property
def processor_status(self):
"""
Gets the processor_status of this ProcessorStatusEntity.
:return: The processor_status of this ProcessorStatusEntity.
:rtype: ProcessorStatusDTO
"""
return self._processor_status
@processor_status.setter
def processor_status(self, processor_status):
"""
Sets the processor_status of this ProcessorStatusEntity.
:param processor_status: The processor_status of this ProcessorStatusEntity.
:type: ProcessorStatusDTO
"""
self._processor_status = processor_status
@property
def can_read(self):
"""
Gets the can_read of this ProcessorStatusEntity.
Indicates whether the user can read a given resource.
:return: The can_read of this ProcessorStatusEntity.
:rtype: bool
"""
return self._can_read
@can_read.setter
def can_read(self, can_read):
"""
Sets the can_read of this ProcessorStatusEntity.
Indicates whether the user can read a given resource.
:param can_read: The can_read of this ProcessorStatusEntity.
:type: bool
"""
self._can_read = can_read
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ProcessorStatusEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.282895 | 479 | 0.577174 |
from pprint import pformat
from six import iteritems
import re
class ProcessorStatusEntity(object):
swagger_types = {
'processor_status': 'ProcessorStatusDTO',
'can_read': 'bool'
}
attribute_map = {
'processor_status': 'processorStatus',
'can_read': 'canRead'
}
def __init__(self, processor_status=None, can_read=None):
self._processor_status = None
self._can_read = None
if processor_status is not None:
self.processor_status = processor_status
if can_read is not None:
self.can_read = can_read
@property
def processor_status(self):
return self._processor_status
@processor_status.setter
def processor_status(self, processor_status):
self._processor_status = processor_status
@property
def can_read(self):
return self._can_read
@can_read.setter
def can_read(self, can_read):
self._can_read = can_read
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ProcessorStatusEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c39b8723e21178c7dc9e88cd80aab540aecd383 | 13,923 | py | Python | sinergym/utils/callbacks.py | AlejandroCN7/sinergym | 4e89e478b5c939323e7ddf6a6ecf25a9a13251c6 | [
"MIT"
] | null | null | null | sinergym/utils/callbacks.py | AlejandroCN7/sinergym | 4e89e478b5c939323e7ddf6a6ecf25a9a13251c6 | [
"MIT"
] | null | null | null | sinergym/utils/callbacks.py | AlejandroCN7/sinergym | 4e89e478b5c939323e7ddf6a6ecf25a9a13251c6 | [
"MIT"
] | null | null | null | """Custom Callbacks for stable baselines 3 algorithms."""
import os
from typing import Optional, Union
import gym
import numpy as np
from stable_baselines3.common.callbacks import BaseCallback, EvalCallback
from stable_baselines3.common.env_util import is_wrapped
from stable_baselines3.common.vec_env import VecEnv, sync_envs_normalization
from sinergym.utils.evaluation import evaluate_policy
from sinergym.utils.wrappers import LoggerWrapper, NormalizeObservation
class LoggerCallback(BaseCallback):
"""Custom callback for plotting additional values in tensorboard.
:param ep_rewards: Here will be stored all rewards during episode.
:param ep_powers: Here will be stored all consumption data during episode.
:param ep_term_comfort: Here will be stored all comfort terms (reward component) during episode.
:param ep_term_energy: Here will be stored all energy terms (reward component) during episode.
:param num_comfort_violation: Number of timesteps in which comfort has been violated.
:param ep_timesteps: Each timestep during an episode, this value increment 1.
"""
def __init__(self, sinergym_logger=False, verbose=0):
"""Custom callback for plotting additional values in tensorboard.
Args:
sinergym_logger (boolean): Indicate if CSVLogger inner Sinergym will be activated or not.
"""
super(LoggerCallback, self).__init__(verbose)
self.sinergym_logger = sinergym_logger
self.ep_rewards = []
self.ep_powers = []
self.ep_term_comfort = []
self.ep_term_energy = []
self.num_comfort_violation = 0
self.ep_timesteps = 0
def _on_training_start(self):
# sinergym logger
if is_wrapped(self.training_env, LoggerWrapper):
if self.sinergym_logger:
self.training_env.env_method('activate_logger')
else:
self.training_env.env_method('deactivate_logger')
# record method depending on the type of algorithm
if 'OnPolicyAlgorithm' in self.globals.keys():
self.record = self.logger.record
elif 'OffPolicyAlgorithm' in self.globals.keys():
self.record = self.logger.record_mean
else:
raise KeyError
def _on_step(self) -> bool:
info = self.locals['infos'][-1]
# OBSERVATION
variables = self.training_env.get_attr('variables')[0]['observation']
# log normalized and original values
if self.training_env.env_is_wrapped(
wrapper_class=NormalizeObservation)[0]:
obs_normalized = self.locals['new_obs'][-1]
obs = self.training_env.env_method('get_unwrapped_obs')[-1]
for i, variable in enumerate(variables):
self.record(
'normalized_observation/' + variable, obs_normalized[i])
self.record(
'observation/' + variable, obs[i])
# Only original values
else:
obs = self.locals['new_obs'][-1]
for i, variable in enumerate(variables):
self.record(
'observation/' + variable, obs[i])
# ACTION
variables = self.training_env.get_attr('variables')[0]['action']
action = None
# sinergym action received inner its own setpoints range
action_ = info['action_']
try:
# network output clipped with gym action space
action = self.locals['clipped_actions'][-1]
except KeyError:
try:
action = self.locals['action'][-1]
except KeyError:
try:
action = self.locals['actions'][-1]
except KeyError:
raise KeyError(
'Algorithm action key in locals dict unknown.')
if self.training_env.get_attr('flag_discrete')[0]:
action = self.training_env.get_attr('action_mapping')[0][action]
for i, variable in enumerate(variables):
if action is not None:
self.record(
'action/' + variable, action[i])
self.record(
'action_simulation/' + variable, action_[i])
# Store episode data
try:
self.ep_rewards.append(self.locals['rewards'][-1])
except KeyError:
try:
self.ep_rewards.append(self.locals['reward'][-1])
except KeyError:
print('Algorithm reward key in locals dict unknown')
self.ep_powers.append(info['total_power'])
self.ep_term_comfort.append(info['comfort_penalty'])
self.ep_term_energy.append(info['total_power_no_units'])
if(info['comfort_penalty'] != 0):
self.num_comfort_violation += 1
self.ep_timesteps += 1
# If episode ends, store summary of episode and reset
try:
done = self.locals['dones'][-1]
except KeyError:
try:
done = self.locals['done'][-1]
except KeyError:
print('Algorithm done key in locals dict unknown')
if done:
# store last episode metrics
self.episode_metrics = {}
self.episode_metrics['ep_length'] = self.ep_timesteps
self.episode_metrics['cumulative_reward'] = np.sum(
self.ep_rewards)
self.episode_metrics['mean_reward'] = np.mean(self.ep_rewards)
self.episode_metrics['mean_power'] = np.mean(self.ep_powers)
self.episode_metrics['cumulative_power'] = np.sum(self.ep_powers)
self.episode_metrics['mean_comfort_penalty'] = np.mean(
self.ep_term_comfort)
self.episode_metrics['cumulative_comfort_penalty'] = np.sum(
self.ep_term_comfort)
self.episode_metrics['mean_power_penalty'] = np.mean(
self.ep_term_energy)
self.episode_metrics['cumulative_power_penalty'] = np.sum(
self.ep_term_energy)
try:
self.episode_metrics['comfort_violation_time(%)'] = self.num_comfort_violation / \
self.ep_timesteps * 100
except ZeroDivisionError:
self.episode_metrics['comfort_violation_time(%)'] = np.nan
# reset episode info
self.ep_rewards = []
self.ep_powers = []
self.ep_term_comfort = []
self.ep_term_energy = []
self.ep_timesteps = 0
self.num_comfort_violation = 0
# During first episode, as it not finished, it shouldn't be recording
if hasattr(self, 'episode_metrics'):
for key, metric in self.episode_metrics.items():
self.logger.record(
'episode/' + key, metric)
return True
def on_training_end(self):
if is_wrapped(self.training_env, LoggerWrapper):
self.training_env.env_method('activate_logger')
class LoggerEvalCallback(EvalCallback):
"""Callback for evaluating an agent.
:param eval_env: The environment used for initialization
:param callback_on_new_best: Callback to trigger when there is a new best model according to the ``mean_reward``
:param n_eval_episodes: The number of episodes to test the agent
:param eval_freq: Evaluate the agent every eval_freq call of the callback.
:param log_path: Path to a folder where the evaluations (``evaluations.npz``) will be saved. It will be updated at each evaluation.
:param best_model_save_path: Path to a folder where the best model according to performance on the eval env will be saved.
:param deterministic: Whether the evaluation should use a stochastic or deterministic actions.
:param render: Whether to render or not the environment during evaluation
:param verbose:
:param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been wrapped with a Monitor wrapper)
"""
def __init__(
self,
eval_env: Union[gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: Optional[str] = None,
best_model_save_path: Optional[str] = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
warn: bool = True,
):
super(
LoggerEvalCallback,
self).__init__(
eval_env=eval_env,
callback_on_new_best=callback_on_new_best,
n_eval_episodes=n_eval_episodes,
eval_freq=eval_freq,
log_path=log_path,
best_model_save_path=best_model_save_path,
deterministic=deterministic,
render=render,
verbose=verbose,
warn=warn)
self.evaluations_power_consumption = []
self.evaluations_comfort_violation = []
self.evaluations_comfort_penalty = []
self.evaluations_power_penalty = []
self.evaluation_metrics = {}
def _on_step(self) -> bool:
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
# Reset success rate buffer
self._is_success_buffer = []
#episodes_rewards, episodes_lengths, episodes_powers, episodes_comfort_violations, episodes_comfort_penalties, episodes_power_penalties
episodes_data = evaluate_policy(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
callback=None,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(
episodes_data['episodes_rewards'])
self.evaluations_length.append(
episodes_data['episodes_lengths'])
self.evaluations_power_consumption.append(
episodes_data['episodes_powers'])
self.evaluations_comfort_violation.append(
episodes_data['episodes_comfort_violations'])
self.evaluations_comfort_penalty.append(
episodes_data['episodes_comfort_penalties'])
self.evaluations_power_penalty.append(
episodes_data['episodes_power_penalties'])
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
ep_powers=self.evaluations_power_consumption,
ep_comfort_violations=self.evaluations_comfort_violation,
episodes_comfort_penalties=self.evaluations_comfort_penalty,
episodes_power_penalties=self.evaluations_power_penalty,
**kwargs,
)
mean_reward, std_reward = np.mean(
episodes_data['episodes_rewards']), np.std(
episodes_data['episodes_rewards'])
mean_ep_length, std_ep_length = np.mean(
episodes_data['episodes_lengths']), np.std(
episodes_data['episodes_lengths'])
self.evaluation_metrics['mean_rewards'] = mean_reward
self.evaluation_metrics['std_rewards'] = std_reward
self.evaluation_metrics['mean_ep_length'] = mean_ep_length
self.evaluation_metrics['mean_power_consumption'] = np.mean(
episodes_data['episodes_powers'])
self.evaluation_metrics['comfort_violation(%)'] = np.mean(
episodes_data['episodes_comfort_violations'])
self.evaluation_metrics['comfort_penalty'] = np.mean(
episodes_data['episodes_comfort_penalties'])
self.evaluation_metrics['power_penalty'] = np.mean(
episodes_data['episodes_power_penalties'])
if self.verbose > 0:
print(
f"Eval num_timesteps={self.num_timesteps}, "
f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(
f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# Add to current Logger
for key, metric in self.evaluation_metrics.items():
self.logger.record('eval/' + key, metric)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(
self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
return True
| 43.373832 | 147 | 0.606119 |
import os
from typing import Optional, Union
import gym
import numpy as np
from stable_baselines3.common.callbacks import BaseCallback, EvalCallback
from stable_baselines3.common.env_util import is_wrapped
from stable_baselines3.common.vec_env import VecEnv, sync_envs_normalization
from sinergym.utils.evaluation import evaluate_policy
from sinergym.utils.wrappers import LoggerWrapper, NormalizeObservation
class LoggerCallback(BaseCallback):
def __init__(self, sinergym_logger=False, verbose=0):
super(LoggerCallback, self).__init__(verbose)
self.sinergym_logger = sinergym_logger
self.ep_rewards = []
self.ep_powers = []
self.ep_term_comfort = []
self.ep_term_energy = []
self.num_comfort_violation = 0
self.ep_timesteps = 0
def _on_training_start(self):
if is_wrapped(self.training_env, LoggerWrapper):
if self.sinergym_logger:
self.training_env.env_method('activate_logger')
else:
self.training_env.env_method('deactivate_logger')
if 'OnPolicyAlgorithm' in self.globals.keys():
self.record = self.logger.record
elif 'OffPolicyAlgorithm' in self.globals.keys():
self.record = self.logger.record_mean
else:
raise KeyError
def _on_step(self) -> bool:
info = self.locals['infos'][-1]
variables = self.training_env.get_attr('variables')[0]['observation']
if self.training_env.env_is_wrapped(
wrapper_class=NormalizeObservation)[0]:
obs_normalized = self.locals['new_obs'][-1]
obs = self.training_env.env_method('get_unwrapped_obs')[-1]
for i, variable in enumerate(variables):
self.record(
'normalized_observation/' + variable, obs_normalized[i])
self.record(
'observation/' + variable, obs[i])
else:
obs = self.locals['new_obs'][-1]
for i, variable in enumerate(variables):
self.record(
'observation/' + variable, obs[i])
variables = self.training_env.get_attr('variables')[0]['action']
action = None
action_ = info['action_']
try:
action = self.locals['clipped_actions'][-1]
except KeyError:
try:
action = self.locals['action'][-1]
except KeyError:
try:
action = self.locals['actions'][-1]
except KeyError:
raise KeyError(
'Algorithm action key in locals dict unknown.')
if self.training_env.get_attr('flag_discrete')[0]:
action = self.training_env.get_attr('action_mapping')[0][action]
for i, variable in enumerate(variables):
if action is not None:
self.record(
'action/' + variable, action[i])
self.record(
'action_simulation/' + variable, action_[i])
try:
self.ep_rewards.append(self.locals['rewards'][-1])
except KeyError:
try:
self.ep_rewards.append(self.locals['reward'][-1])
except KeyError:
print('Algorithm reward key in locals dict unknown')
self.ep_powers.append(info['total_power'])
self.ep_term_comfort.append(info['comfort_penalty'])
self.ep_term_energy.append(info['total_power_no_units'])
if(info['comfort_penalty'] != 0):
self.num_comfort_violation += 1
self.ep_timesteps += 1
try:
done = self.locals['dones'][-1]
except KeyError:
try:
done = self.locals['done'][-1]
except KeyError:
print('Algorithm done key in locals dict unknown')
if done:
self.episode_metrics = {}
self.episode_metrics['ep_length'] = self.ep_timesteps
self.episode_metrics['cumulative_reward'] = np.sum(
self.ep_rewards)
self.episode_metrics['mean_reward'] = np.mean(self.ep_rewards)
self.episode_metrics['mean_power'] = np.mean(self.ep_powers)
self.episode_metrics['cumulative_power'] = np.sum(self.ep_powers)
self.episode_metrics['mean_comfort_penalty'] = np.mean(
self.ep_term_comfort)
self.episode_metrics['cumulative_comfort_penalty'] = np.sum(
self.ep_term_comfort)
self.episode_metrics['mean_power_penalty'] = np.mean(
self.ep_term_energy)
self.episode_metrics['cumulative_power_penalty'] = np.sum(
self.ep_term_energy)
try:
self.episode_metrics['comfort_violation_time(%)'] = self.num_comfort_violation / \
self.ep_timesteps * 100
except ZeroDivisionError:
self.episode_metrics['comfort_violation_time(%)'] = np.nan
self.ep_rewards = []
self.ep_powers = []
self.ep_term_comfort = []
self.ep_term_energy = []
self.ep_timesteps = 0
self.num_comfort_violation = 0
if hasattr(self, 'episode_metrics'):
for key, metric in self.episode_metrics.items():
self.logger.record(
'episode/' + key, metric)
return True
def on_training_end(self):
if is_wrapped(self.training_env, LoggerWrapper):
self.training_env.env_method('activate_logger')
class LoggerEvalCallback(EvalCallback):
def __init__(
self,
eval_env: Union[gym.Env, VecEnv],
callback_on_new_best: Optional[BaseCallback] = None,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
log_path: Optional[str] = None,
best_model_save_path: Optional[str] = None,
deterministic: bool = True,
render: bool = False,
verbose: int = 1,
warn: bool = True,
):
super(
LoggerEvalCallback,
self).__init__(
eval_env=eval_env,
callback_on_new_best=callback_on_new_best,
n_eval_episodes=n_eval_episodes,
eval_freq=eval_freq,
log_path=log_path,
best_model_save_path=best_model_save_path,
deterministic=deterministic,
render=render,
verbose=verbose,
warn=warn)
self.evaluations_power_consumption = []
self.evaluations_comfort_violation = []
self.evaluations_comfort_penalty = []
self.evaluations_power_penalty = []
self.evaluation_metrics = {}
def _on_step(self) -> bool:
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
# Sync training and eval env if there is VecNormalize
sync_envs_normalization(self.training_env, self.eval_env)
# Reset success rate buffer
self._is_success_buffer = []
#episodes_rewards, episodes_lengths, episodes_powers, episodes_comfort_violations, episodes_comfort_penalties, episodes_power_penalties
episodes_data = evaluate_policy(
self.model,
self.eval_env,
n_eval_episodes=self.n_eval_episodes,
render=self.render,
deterministic=self.deterministic,
callback=None,
)
if self.log_path is not None:
self.evaluations_timesteps.append(self.num_timesteps)
self.evaluations_results.append(
episodes_data['episodes_rewards'])
self.evaluations_length.append(
episodes_data['episodes_lengths'])
self.evaluations_power_consumption.append(
episodes_data['episodes_powers'])
self.evaluations_comfort_violation.append(
episodes_data['episodes_comfort_violations'])
self.evaluations_comfort_penalty.append(
episodes_data['episodes_comfort_penalties'])
self.evaluations_power_penalty.append(
episodes_data['episodes_power_penalties'])
kwargs = {}
# Save success log if present
if len(self._is_success_buffer) > 0:
self.evaluations_successes.append(self._is_success_buffer)
kwargs = dict(successes=self.evaluations_successes)
np.savez(
self.log_path,
timesteps=self.evaluations_timesteps,
results=self.evaluations_results,
ep_lengths=self.evaluations_length,
ep_powers=self.evaluations_power_consumption,
ep_comfort_violations=self.evaluations_comfort_violation,
episodes_comfort_penalties=self.evaluations_comfort_penalty,
episodes_power_penalties=self.evaluations_power_penalty,
**kwargs,
)
mean_reward, std_reward = np.mean(
episodes_data['episodes_rewards']), np.std(
episodes_data['episodes_rewards'])
mean_ep_length, std_ep_length = np.mean(
episodes_data['episodes_lengths']), np.std(
episodes_data['episodes_lengths'])
self.evaluation_metrics['mean_rewards'] = mean_reward
self.evaluation_metrics['std_rewards'] = std_reward
self.evaluation_metrics['mean_ep_length'] = mean_ep_length
self.evaluation_metrics['mean_power_consumption'] = np.mean(
episodes_data['episodes_powers'])
self.evaluation_metrics['comfort_violation(%)'] = np.mean(
episodes_data['episodes_comfort_violations'])
self.evaluation_metrics['comfort_penalty'] = np.mean(
episodes_data['episodes_comfort_penalties'])
self.evaluation_metrics['power_penalty'] = np.mean(
episodes_data['episodes_power_penalties'])
if self.verbose > 0:
print(
f"Eval num_timesteps={self.num_timesteps}, "
f"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}")
print(
f"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}")
# Add to current Logger
for key, metric in self.evaluation_metrics.items():
self.logger.record('eval/' + key, metric)
if len(self._is_success_buffer) > 0:
success_rate = np.mean(self._is_success_buffer)
if self.verbose > 0:
print(f"Success rate: {100 * success_rate:.2f}%")
self.logger.record("eval/success_rate", success_rate)
if mean_reward > self.best_mean_reward:
if self.verbose > 0:
print("New best mean reward!")
if self.best_model_save_path is not None:
self.model.save(os.path.join(
self.best_model_save_path, "best_model"))
self.best_mean_reward = mean_reward
# Trigger callback if needed
if self.callback is not None:
return self._on_event()
return True
| true | true |
1c39b9aca31c7993f8e67a738f983c9eb28d0bf7 | 1,205 | py | Python | models/block.py | uncleguanghui/bitcoin_toolkit | c5898d841201ccd3271adee43f7d116e6333e0d8 | [
"MIT"
] | null | null | null | models/block.py | uncleguanghui/bitcoin_toolkit | c5898d841201ccd3271adee43f7d116e6333e0d8 | [
"MIT"
] | 1 | 2020-10-12T01:52:50.000Z | 2021-06-22T10:29:10.000Z | models/block.py | uncleguanghui/bitcoin_toolkit | c5898d841201ccd3271adee43f7d116e6333e0d8 | [
"MIT"
] | 1 | 2021-03-26T15:18:26.000Z | 2021-03-26T15:18:26.000Z | from datetime import datetime
class Block:
def __init__(self,
block_hash: str,
*,
previous_hash: str = None,
height: int = None,
timestamp: datetime = None,
n_tx: int = None):
assert isinstance(block_hash, str), block_hash
assert previous_hash is None or isinstance(previous_hash, str), previous_hash
assert height is None or isinstance(height, int), height
assert timestamp is None or isinstance(timestamp, datetime), timestamp
assert n_tx is None or isinstance(n_tx, int), n_tx
self.hash = block_hash # 区块 hash
self.previous_hash = previous_hash # 前一个区块 hash
self.height = height # 允许没有高度数据(场景:区块未排序)
self.datetime = timestamp # 挖矿时间
self.n_tx = n_tx # 交易数
self.txid_list = [] # 交易 id 列表
def __repr__(self):
return f'Block({self.hash})' + (f'({self.height})' if self.height is not None else '')
def add_txid(self, txid: str):
"""
添加 txid
:param txid:
:return:
"""
assert isinstance(txid, str), txid
self.txid_list.append(txid)
| 34.428571 | 94 | 0.575934 | from datetime import datetime
class Block:
def __init__(self,
block_hash: str,
*,
previous_hash: str = None,
height: int = None,
timestamp: datetime = None,
n_tx: int = None):
assert isinstance(block_hash, str), block_hash
assert previous_hash is None or isinstance(previous_hash, str), previous_hash
assert height is None or isinstance(height, int), height
assert timestamp is None or isinstance(timestamp, datetime), timestamp
assert n_tx is None or isinstance(n_tx, int), n_tx
self.hash = block_hash
self.previous_hash = previous_hash
self.height = height
self.datetime = timestamp
self.n_tx = n_tx
self.txid_list = []
def __repr__(self):
return f'Block({self.hash})' + (f'({self.height})' if self.height is not None else '')
def add_txid(self, txid: str):
assert isinstance(txid, str), txid
self.txid_list.append(txid)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.