hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acde81328d86e4535412fb6e1e07204cf79b6eab | 52 | py | Python | __main__.py | josebadoe/trkm | fb25e2f4c67e88ca9dfbfd0816d402e9ddbc7836 | [
"Apache-2.0"
] | null | null | null | __main__.py | josebadoe/trkm | fb25e2f4c67e88ca9dfbfd0816d402e9ddbc7836 | [
"Apache-2.0"
] | null | null | null | __main__.py | josebadoe/trkm | fb25e2f4c67e88ca9dfbfd0816d402e9ddbc7836 | [
"Apache-2.0"
] | null | null | null | from trkm.trkm import TrackMerge
TrackMerge.main()
| 13 | 32 | 0.807692 |
acde815a66dc952fe50c800c82f39d5b8b1a8a66 | 2,300 | py | Python | chainer/training/extensions/exponential_shift.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | 7 | 2017-05-08T07:02:40.000Z | 2018-12-02T18:35:39.000Z | chainer/training/extensions/exponential_shift.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | null | null | null | chainer/training/extensions/exponential_shift.py | mingxiaoh/chainer-v3 | 815ff00f5eaf7944d6e8a75662ff64a2fe046a4d | [
"BSD-3-Clause"
] | 1 | 2021-05-27T16:52:11.000Z | 2021-05-27T16:52:11.000Z | from __future__ import division
from chainer.training import extension
class ExponentialShift(extension.Extension):
"""Trainer extension to exponentially shift an optimizer attribute.
This extension exponentially increases or decreases the specified attribute
of the optimizer. The typical use case is an exponential decay of the
learning rate.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
rate (float): Rate of the exponential shift. This value is multiplied
to the attribute at each call.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
invoke_before_training = True
def __init__(self, attr, rate, init=None, target=None, optimizer=None):
self._attr = attr
if rate < 0:
raise ValueError('ExponentialShift does not support negative rate')
self._rate = rate
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
def __call__(self, trainer):
optimizer = self._optimizer or trainer.updater.get_optimizer('main')
if self._init is None:
self._init = getattr(optimizer, self._attr)
value = self._init * (self._rate ** self._t)
if self._target is not None:
if self._rate > 1:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if value / self._target > 1:
value = self._target
else:
# ditto
if value / self._target < 1:
value = self._target
setattr(optimizer, self._attr, value)
self._t += 1
def serialize(self, serializer):
self._t = serializer('_t', self._t)
| 37.096774 | 79 | 0.62087 |
acde817fbda7e9569031f7e2e8789325b11ed427 | 166,424 | py | Python | python/pyspark/sql/functions.py | kirito1015/xiaoluobo_spark | 7a570ddf108d750264c6997c02d3ae59b1f59e36 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | python/pyspark/sql/functions.py | kirito1015/xiaoluobo_spark | 7a570ddf108d750264c6997c02d3ae59b1f59e36 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | python/pyspark/sql/functions.py | kirito1015/xiaoluobo_spark | 7a570ddf108d750264c6997c02d3ae59b1f59e36 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
from pyspark import since, SparkContext
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf # noqa: F401
from pyspark.sql.udf import _create_udf
# Keep pandas_udf and PandasUDFType import for backwards compatible import; moved in SPARK-28264
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType # noqa: F401
from pyspark.sql.utils import to_str
# Note to developers: all of PySpark functions here take string as column names whenever possible.
# Namely, if columns are referred as arguments, they can be always both Column or string,
# even though there might be few exceptions for legacy or inevitable reasons.
# If you are fixing other language APIs together, also please note that Scala side is not the case
# since it requires to make every single overridden definition.
def _get_get_jvm_function(name, sc):
"""
Retrieves JVM function identified by name from
Java gateway associated with sc.
"""
return getattr(sc._jvm.functions, name)
def _invoke_function(name, *args):
"""
Invokes JVM function identified by name with args
and wraps the result with :class:`~pyspark.sql.Column`.
"""
jf = _get_get_jvm_function(name, SparkContext._active_spark_context)
return Column(jf(*args))
def _invoke_function_over_column(name, col):
"""
Invokes unary JVM function identified by name
and wraps the result with :class:`~pyspark.sql.Column`.
"""
return _invoke_function(name, _to_java_column(col))
def _invoke_binary_math_function(name, col1, col2):
"""
Invokes binary JVM math function identified by name
and wraps the result with :class:`~pyspark.sql.Column`.
"""
return _invoke_function(
name,
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
_to_java_column(col1) if isinstance(col1, (str, Column)) else float(col1),
_to_java_column(col2) if isinstance(col2, (str, Column)) else float(col2)
)
def _options_to_str(options=None):
if options:
return {key: to_str(value) for (key, value) in options.items()}
return {}
def lit(col):
"""
Creates a :class:`~pyspark.sql.Column` of literal value.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
return col if isinstance(col, Column) else _invoke_function("lit", col)
@since(1.3)
def col(col):
"""
Returns a :class:`~pyspark.sql.Column` based on the given column name.'
Examples
--------
>>> col('x')
Column<'x'>
>>> column('x')
Column<'x'>
"""
return _invoke_function("col", col)
column = col
@since(1.3)
def asc(col):
"""
Returns a sort expression based on the ascending order of the given column name.
"""
return (
col.asc() if isinstance(col, Column)
else _invoke_function("asc", col)
)
@since(1.3)
def desc(col):
"""
Returns a sort expression based on the descending order of the given column name.
"""
return (
col.desc() if isinstance(col, Column)
else _invoke_function("desc", col)
)
@since(1.3)
def sqrt(col):
"""
Computes the square root of the specified float value.
"""
return _invoke_function_over_column("sqrt", col)
@since(1.3)
def abs(col):
"""
Computes the absolute value.
"""
return _invoke_function_over_column("abs", col)
@since(1.3)
def max(col):
"""
Aggregate function: returns the maximum value of the expression in a group.
"""
return _invoke_function_over_column("max", col)
@since(1.3)
def min(col):
"""
Aggregate function: returns the minimum value of the expression in a group.
"""
return _invoke_function_over_column("min", col)
@since(1.3)
def count(col):
"""
Aggregate function: returns the number of items in a group.
"""
return _invoke_function_over_column("count", col)
@since(1.3)
def sum(col):
"""
Aggregate function: returns the sum of all values in the expression.
"""
return _invoke_function_over_column("sum", col)
@since(1.3)
def avg(col):
"""
Aggregate function: returns the average of the values in a group.
"""
return _invoke_function_over_column("avg", col)
@since(1.3)
def mean(col):
"""
Aggregate function: returns the average of the values in a group.
"""
return _invoke_function_over_column("mean", col)
@since(1.3)
def sumDistinct(col):
"""
Aggregate function: returns the sum of distinct values in the expression.
.. deprecated:: 3.2.0
Use :func:`sum_distinct` instead.
"""
warnings.warn("Deprecated in 3.2, use sum_distinct instead.", FutureWarning)
return sum_distinct(col)
@since(3.2)
def sum_distinct(col):
"""
Aggregate function: returns the sum of distinct values in the expression.
"""
return _invoke_function_over_column("sum_distinct", col)
def product(col):
"""
Aggregate function: returns the product of the values in a group.
.. versionadded:: 3.2.0
Parameters
----------
col : str, :class:`Column`
column containing values to be multiplied together
Examples
--------
>>> df = spark.range(1, 10).toDF('x').withColumn('mod3', col('x') % 3)
>>> prods = df.groupBy('mod3').agg(product('x').alias('product'))
>>> prods.orderBy('mod3').show()
+----+-------+
|mod3|product|
+----+-------+
| 0| 162.0|
| 1| 28.0|
| 2| 80.0|
+----+-------+
"""
return _invoke_function_over_column("product", col)
def acos(col):
"""
.. versionadded:: 1.4.0
Returns
-------
:class:`~pyspark.sql.Column`
inverse cosine of `col`, as if computed by `java.lang.Math.acos()`
"""
return _invoke_function_over_column("acos", col)
def acosh(col):
"""
Computes inverse hyperbolic cosine of the input column.
.. versionadded:: 3.1.0
Returns
-------
:class:`~pyspark.sql.Column`
"""
return _invoke_function_over_column("acosh", col)
def asin(col):
"""
.. versionadded:: 1.3.0
Returns
-------
:class:`~pyspark.sql.Column`
inverse sine of `col`, as if computed by `java.lang.Math.asin()`
"""
return _invoke_function_over_column("asin", col)
def asinh(col):
"""
Computes inverse hyperbolic sine of the input column.
.. versionadded:: 3.1.0
Returns
-------
:class:`~pyspark.sql.Column`
"""
return _invoke_function_over_column("asinh", col)
def atan(col):
"""
.. versionadded:: 1.4.0
Returns
-------
:class:`~pyspark.sql.Column`
inverse tangent of `col`, as if computed by `java.lang.Math.atan()`
"""
return _invoke_function_over_column("atan", col)
def atanh(col):
"""
Computes inverse hyperbolic tangent of the input column.
.. versionadded:: 3.1.0
Returns
-------
:class:`~pyspark.sql.Column`
"""
return _invoke_function_over_column("atanh", col)
@since(1.4)
def cbrt(col):
"""
Computes the cube-root of the given value.
"""
return _invoke_function_over_column("cbrt", col)
@since(1.4)
def ceil(col):
"""
Computes the ceiling of the given value.
"""
return _invoke_function_over_column("ceil", col)
def cos(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in radians
Returns
-------
:class:`~pyspark.sql.Column`
cosine of the angle, as if computed by `java.lang.Math.cos()`.
"""
return _invoke_function_over_column("cos", col)
def cosh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
hyperbolic angle
Returns
-------
:class:`~pyspark.sql.Column`
hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`
"""
return _invoke_function_over_column("cosh", col)
@since(1.4)
def exp(col):
"""
Computes the exponential of the given value.
"""
return _invoke_function_over_column("exp", col)
@since(1.4)
def expm1(col):
"""
Computes the exponential of the given value minus one.
"""
return _invoke_function_over_column("expm1", col)
@since(1.4)
def floor(col):
"""
Computes the floor of the given value.
"""
return _invoke_function_over_column("floor", col)
@since(1.4)
def log(col):
"""
Computes the natural logarithm of the given value.
"""
return _invoke_function_over_column("log", col)
@since(1.4)
def log10(col):
"""
Computes the logarithm of the given value in Base 10.
"""
return _invoke_function_over_column("log10", col)
@since(1.4)
def log1p(col):
"""
Computes the natural logarithm of the given value plus one.
"""
return _invoke_function_over_column("log1p", col)
@since(1.4)
def rint(col):
"""
Returns the double value that is closest in value to the argument and
is equal to a mathematical integer.
"""
return _invoke_function_over_column("rint", col)
@since(1.4)
def signum(col):
"""
Computes the signum of the given value.
"""
return _invoke_function_over_column("signum", col)
def sin(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
Returns
-------
:class:`~pyspark.sql.Column`
sine of the angle, as if computed by `java.lang.Math.sin()`
"""
return _invoke_function_over_column("sin", col)
def sinh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
hyperbolic angle
Returns
-------
:class:`~pyspark.sql.Column`
hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`
"""
return _invoke_function_over_column("sinh", col)
def tan(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in radians
Returns
-------
:class:`~pyspark.sql.Column`
tangent of the given value, as if computed by `java.lang.Math.tan()`
"""
return _invoke_function_over_column("tan", col)
def tanh(col):
"""
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
hyperbolic angle
Returns
-------
:class:`~pyspark.sql.Column`
hyperbolic tangent of the given value
as if computed by `java.lang.Math.tanh()`
"""
return _invoke_function_over_column("tanh", col)
@since(1.4)
def toDegrees(col):
"""
.. deprecated:: 2.1.0
Use :func:`degrees` instead.
"""
warnings.warn("Deprecated in 2.1, use degrees instead.", FutureWarning)
return degrees(col)
@since(1.4)
def toRadians(col):
"""
.. deprecated:: 2.1.0
Use :func:`radians` instead.
"""
warnings.warn("Deprecated in 2.1, use radians instead.", FutureWarning)
return radians(col)
@since(1.4)
def bitwiseNOT(col):
"""
Computes bitwise not.
.. deprecated:: 3.2.0
Use :func:`bitwise_not` instead.
"""
warnings.warn("Deprecated in 3.2, use bitwise_not instead.", FutureWarning)
return bitwise_not(col)
@since(3.2)
def bitwise_not(col):
"""
Computes bitwise not.
"""
return _invoke_function_over_column("bitwise_not", col)
@since(2.4)
def asc_nulls_first(col):
"""
Returns a sort expression based on the ascending order of the given
column name, and null values return before non-null values.
"""
return (
col.asc_nulls_first() if isinstance(col, Column)
else _invoke_function("asc_nulls_first", col)
)
@since(2.4)
def asc_nulls_last(col):
"""
Returns a sort expression based on the ascending order of the given
column name, and null values appear after non-null values.
"""
return (
col.asc_nulls_last() if isinstance(col, Column)
else _invoke_function("asc_nulls_last", col)
)
@since(2.4)
def desc_nulls_first(col):
"""
Returns a sort expression based on the descending order of the given
column name, and null values appear before non-null values.
"""
return (
col.desc_nulls_first() if isinstance(col, Column)
else _invoke_function("desc_nulls_first", col)
)
@since(2.4)
def desc_nulls_last(col):
"""
Returns a sort expression based on the descending order of the given
column name, and null values appear after non-null values.
"""
return (
col.desc_nulls_last() if isinstance(col, Column)
else _invoke_function("desc_nulls_last", col)
)
@since(1.6)
def stddev(col):
"""
Aggregate function: alias for stddev_samp.
"""
return _invoke_function_over_column("stddev", col)
@since(1.6)
def stddev_samp(col):
"""
Aggregate function: returns the unbiased sample standard deviation of
the expression in a group.
"""
return _invoke_function_over_column("stddev_samp", col)
@since(1.6)
def stddev_pop(col):
"""
Aggregate function: returns population standard deviation of
the expression in a group.
"""
return _invoke_function_over_column("stddev_pop", col)
@since(1.6)
def variance(col):
"""
Aggregate function: alias for var_samp
"""
return _invoke_function_over_column("variance", col)
@since(1.6)
def var_samp(col):
"""
Aggregate function: returns the unbiased sample variance of
the values in a group.
"""
return _invoke_function_over_column("var_samp", col)
@since(1.6)
def var_pop(col):
"""
Aggregate function: returns the population variance of the values in a group.
"""
return _invoke_function_over_column("var_pop", col)
@since(1.6)
def skewness(col):
"""
Aggregate function: returns the skewness of the values in a group.
"""
return _invoke_function_over_column("skewness", col)
@since(1.6)
def kurtosis(col):
"""
Aggregate function: returns the kurtosis of the values in a group.
"""
return _invoke_function_over_column("kurtosis", col)
def collect_list(col):
"""
Aggregate function: returns a list of objects with duplicates.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because the order of collected results depends
on the order of the rows which may be non-deterministic after a shuffle.
Examples
--------
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
return _invoke_function_over_column("collect_list", col)
def collect_set(col):
"""
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because the order of collected results depends
on the order of the rows which may be non-deterministic after a shuffle.
Examples
--------
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
return _invoke_function_over_column("collect_set", col)
def degrees(col):
"""
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in radians
Returns
-------
:class:`~pyspark.sql.Column`
angle in degrees, as if computed by `java.lang.Math.toDegrees()`
"""
return _invoke_function_over_column("degrees", col)
def radians(col):
"""
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
angle in degrees
Returns
-------
:class:`~pyspark.sql.Column`
angle in radians, as if computed by `java.lang.Math.toRadians()`
"""
return _invoke_function_over_column("radians", col)
def atan2(col1, col2):
"""
.. versionadded:: 1.4.0
Parameters
----------
col1 : str, :class:`~pyspark.sql.Column` or float
coordinate on y-axis
col2 : str, :class:`~pyspark.sql.Column` or float
coordinate on x-axis
Returns
-------
:class:`~pyspark.sql.Column`
the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
"""
return _invoke_binary_math_function("atan2", col1, col2)
@since(1.4)
def hypot(col1, col2):
"""
Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.
"""
return _invoke_binary_math_function("hypot", col1, col2)
@since(1.4)
def pow(col1, col2):
"""
Returns the value of the first argument raised to the power of the second argument.
"""
return _invoke_binary_math_function("pow", col1, col2)
@since(1.6)
def row_number():
"""
Window function: returns a sequential number starting at 1 within a window partition.
"""
return _invoke_function("row_number")
@since(1.6)
def dense_rank():
"""
Window function: returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.
"""
return _invoke_function("dense_rank")
@since(1.6)
def rank():
"""
Window function: returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.
"""
return _invoke_function("rank")
@since(1.6)
def cume_dist():
"""
Window function: returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.
"""
return _invoke_function("cume_dist")
@since(1.6)
def percent_rank():
"""
Window function: returns the relative rank (i.e. percentile) of rows within a window partition.
"""
return _invoke_function("percent_rank")
@since(1.3)
def approxCountDistinct(col, rsd=None):
"""
.. deprecated:: 2.1.0
Use :func:`approx_count_distinct` instead.
"""
warnings.warn("Deprecated in 2.1, use approx_count_distinct instead.", FutureWarning)
return approx_count_distinct(col, rsd)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`~pyspark.sql.Column` for approximate distinct count
of column `col`.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
rsd : float, optional
maximum relative standard deviation allowed (default = 0.05).
For rsd < 0.01, it is more efficient to use :func:`count_distinct`
Examples
--------
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
def coalesce(*cols):
"""Returns the first column that is not null.
.. versionadded:: 1.4.0
Examples
--------
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def corr(col1, col2):
"""Returns a new :class:`~pyspark.sql.Column` for the Pearson Correlation Coefficient for
``col1`` and ``col2``.
.. versionadded:: 1.6.0
Examples
--------
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
def covar_pop(col1, col2):
"""Returns a new :class:`~pyspark.sql.Column` for the population covariance of ``col1`` and
``col2``.
.. versionadded:: 2.0.0
Examples
--------
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
def covar_samp(col1, col2):
"""Returns a new :class:`~pyspark.sql.Column` for the sample covariance of ``col1`` and
``col2``.
.. versionadded:: 2.0.0
Examples
--------
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
def countDistinct(col, *cols):
"""Returns a new :class:`~pyspark.sql.Column` for distinct count of ``col`` or ``cols``.
An alias of :func:`count_distinct`, and it is encouraged to use :func:`count_distinct`
directly.
.. versionadded:: 1.3.0
"""
return count_distinct(col, *cols)
def count_distinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
.. versionadded:: 3.2.0
Examples
--------
>>> df.agg(count_distinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(count_distinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.count_distinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. versionadded:: 1.3.0
Notes
-----
The function is non-deterministic because its results depends on the order of the
rows which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
.. versionadded:: 2.0.0
Examples
--------
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. versionadded:: 2.0.0
Notes
-----
The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
Examples
--------
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
def isnan(col):
"""An expression that returns true iff the column is NaN.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
def isnull(col):
"""An expression that returns true iff the column is null.
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. versionadded:: 1.3.0
Notes
-----
The function is non-deterministic because its results depends on the order of the
rows which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. versionadded:: 1.6.0
Notes
-----
The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
.. versionadded:: 1.6.0
Examples
--------
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
def percentile_approx(col, percentage, accuracy=10000):
"""Returns the approximate `percentile` of the numeric column `col` which is the smallest value
in the ordered `col` values (sorted from least to greatest) such that no more than `percentage`
of `col` values is less than the value or equal to that value.
The value of percentage must be between 0.0 and 1.0.
The accuracy parameter (default: 10000)
is a positive numeric literal which controls approximation accuracy at the cost of memory.
Higher value of accuracy yields better accuracy, 1.0/accuracy is the relative error
of the approximation.
When percentage is an array, each value of the percentage array must be between 0.0 and 1.0.
In this case, returns the approximate percentile array of column col
at the given percentage array.
.. versionadded:: 3.1.0
Examples
--------
>>> key = (col("id") % 3).alias("key")
>>> value = (randn(42) + key * 10).alias("value")
>>> df = spark.range(0, 1000, 1, 1).select(key, value)
>>> df.select(
... percentile_approx("value", [0.25, 0.5, 0.75], 1000000).alias("quantiles")
... ).printSchema()
root
|-- quantiles: array (nullable = true)
| |-- element: double (containsNull = false)
>>> df.groupBy("key").agg(
... percentile_approx("value", 0.5, lit(1000000)).alias("median")
... ).printSchema()
root
|-- key: long (nullable = true)
|-- median: double (nullable = true)
"""
sc = SparkContext._active_spark_context
if isinstance(percentage, (list, tuple)):
# A local list
percentage = sc._jvm.functions.array(_to_seq(sc, [
_create_column_from_literal(x) for x in percentage
]))
elif isinstance(percentage, Column):
# Already a Column
percentage = _to_java_column(percentage)
else:
# Probably scalar
percentage = _create_column_from_literal(percentage)
accuracy = (
_to_java_column(accuracy) if isinstance(accuracy, Column)
else _create_column_from_literal(accuracy)
)
return Column(sc._jvm.functions.percentile_approx(_to_java_column(col), percentage, accuracy))
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
uniformly distributed in [0.0, 1.0).
.. versionadded:: 1.4.0
Notes
-----
The function is non-deterministic in general case.
Examples
--------
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name='Alice', rand=2.4052597283576684),
Row(age=5, name='Bob', rand=2.3913904055683974)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. versionadded:: 1.4.0
Notes
-----
The function is non-deterministic in general case.
Examples
--------
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name='Alice', randn=1.1027054481455365),
Row(age=5, name='Bob', randn=0.7400395449950132)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
.. versionadded:: 2.0.0
Examples
--------
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
.. versionadded:: 1.5.0
.. deprecated:: 3.2.0
Use :func:`shiftleft` instead.
"""
warnings.warn("Deprecated in 3.2, use shiftleft instead.", FutureWarning)
return shiftleft(col, numBits)
def shiftleft(col, numBits):
"""Shift the given value numBits left.
.. versionadded:: 3.2.0
Examples
--------
>>> spark.createDataFrame([(21,)], ['a']).select(shiftleft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftleft(_to_java_column(col), numBits))
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
.. versionadded:: 1.5.0
.. deprecated:: 3.2.0
Use :func:`shiftright` instead.
"""
warnings.warn("Deprecated in 3.2, use shiftright instead.", FutureWarning)
return shiftright(col, numBits)
def shiftright(col, numBits):
"""(Signed) shift the given value numBits right.
.. versionadded:: 3.2.0
Examples
--------
>>> spark.createDataFrame([(42,)], ['a']).select(shiftright('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
.. versionadded:: 1.5.0
.. deprecated:: 3.2.0
Use :func:`shiftrightunsigned` instead.
"""
warnings.warn("Deprecated in 3.2, use shiftrightunsigned instead.", FutureWarning)
return shiftrightunsigned(col, numBits)
def shiftrightunsigned(col, numBits):
"""Unsigned shift the given value numBits right.
.. versionadded:: 3.2.0
Examples
--------
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftrightunsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
def spark_partition_id():
"""A column for partition ID.
.. versionadded:: 1.6.0
Notes
-----
This is non deterministic because it depends on data partitioning and task scheduling.
Examples
--------
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
def expr(str):
"""Parses the expression string into the column that it represents
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
def struct(*cols):
"""Creates a new struct column.
.. versionadded:: 1.4.0
Parameters
----------
cols : list, set, str or :class:`~pyspark.sql.Column`
column names or :class:`~pyspark.sql.Column`\\s to contain in the output struct.
Examples
--------
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name='Alice')), Row(struct=Row(age=5, name='Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`pyspark.sql.Column.otherwise` is not invoked, None is returned for unmatched
conditions.
.. versionadded:: 1.4.0
Parameters
----------
condition : :class:`~pyspark.sql.Column`
a boolean :class:`~pyspark.sql.Column` expression.
value :
a literal value, or a :class:`~pyspark.sql.Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
def log2(col):
"""Returns the base-2 logarithm of the argument.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex='15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
def factorial(col):
"""
Computes the factorial of the given value.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`default` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
offset : int, optional
number of row to extend
default : optional
default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`default` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
offset : int, optional
number of row to extend
default : optional
default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
def nth_value(col, offset, ignoreNulls=False):
"""
Window function: returns the value that is the `offset`\\th row of the window frame
(counting from 1), and `null` if the size of window frame is less than `offset` rows.
It will return the `offset`\\th non-null value it sees when `ignoreNulls` is set to
true. If all values are null, then null is returned.
This is equivalent to the nth_value function in SQL.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
offset : int, optional
number of row to use as the value
ignoreNulls : bool, optional
indicates the Nth value should skip null in the
determination of which row to use
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nth_value(_to_java_column(col), offset, ignoreNulls))
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
.. versionadded:: 1.4.0
Parameters
----------
n : int
an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date at the start of query evaluation as a :class:`DateType` column.
All calls of current_date within the same query return the same value.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp at the start of query evaluation as a :class:`TimestampType`
column. All calls of current_timestamp within the same query return the same value.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of `datetime pattern`_. can be used.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 1.5.0
Notes
-----
Whenever possible, use specialized functions like `year`.
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date='04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
def year(col):
"""
Extract the year of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
def quarter(col):
"""
Extract the quarter of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
def month(col):
"""
Extract the month of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
def hour(col):
"""
Extract the hours of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
def minute(col):
"""
Extract the minutes of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
def second(col):
"""
Extract the seconds of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
def weekofyear(col):
"""
Extract the week number of a given date as integer.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
def to_date(col, format=None):
"""Converts a :class:`~pyspark.sql.Column` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to `datetime pattern`_.
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted. Equivalent to ``col.cast("date")``.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 2.2.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
def to_timestamp(col, format=None):
"""Converts a :class:`~pyspark.sql.Column` into :class:`pyspark.sql.types.TimestampType`
using the optionally specified format. Specify formats according to `datetime pattern`_.
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted. Equivalent to ``col.cast("timestamp")``.
.. _datetime pattern: https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html
.. versionadded:: 2.2.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
.. versionadded:: 1.5.0
Parameters
----------
date : :class:`~pyspark.sql.Column` or str
format : str
'year', 'yyyy', 'yy' to truncate by year,
or 'month', 'mon', 'mm' to truncate by month
Other options are: 'week', 'quarter'
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
.. versionadded:: 2.3.0
Parameters
----------
format : str
'year', 'yyyy', 'yy' to truncate by year,
'month', 'mon', 'mm' to truncate by month,
'day', 'dd' to truncate by day,
Other options are:
'microsecond', 'millisecond', 'second', 'minute', 'hour', 'week', 'quarter'
timestamp : :class:`~pyspark.sql.Column` or str
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts='2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
.. versionadded:: 1.5.0
Parameters
----------
timestamp : :class:`~pyspark.sql.Column` or str
the column that contains timestamps
tz : :class:`~pyspark.sql.Column` or str
A string detailing the time zone ID that the input should be adjusted to. It should
be in the format of either region-based zone IDs or zone offsets. Region IDs must
have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
supported as aliases of '+00:00'. Other short names are not recommended to use
because they can be ambiguous.
.. versionchanged:: 2.4
`tz` can take a :class:`~pyspark.sql.Column` containing timezone ID strings.
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
.. versionadded:: 1.5.0
Parameters
----------
timestamp : :class:`~pyspark.sql.Column` or str
the column that contains timestamps
tz : :class:`~pyspark.sql.Column` or str
A string detailing the time zone ID that the input should be adjusted to. It should
be in the format of either region-based zone IDs or zone offsets. Region IDs must
have the form 'area/city', such as 'America/Los_Angeles'. Zone offsets must be in
the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. Also 'UTC' and 'Z' are
upported as aliases of '+00:00'. Other short names are not recommended to use
because they can be ambiguous.
.. versionchanged:: 2.4.0
`tz` can take a :class:`~pyspark.sql.Column` containing timezone ID strings.
Examples
--------
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
def timestamp_seconds(col):
"""
.. versionadded:: 3.1.0
Examples
--------
>>> from pyspark.sql.functions import timestamp_seconds
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1230219000,)], ['unix_time'])
>>> time_df.select(timestamp_seconds(time_df.unix_time).alias('ts')).show()
+-------------------+
| ts|
+-------------------+
|2008-12-25 07:30:00|
+-------------------+
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.timestamp_seconds(_to_java_column(col)))
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
.. versionadded:: 2.0.0
Parameters
----------
timeColumn : :class:`~pyspark.sql.Column`
The column or the expression to use as the timestamp for windowing by time.
The time column must be of TimestampType or TimestampNTZType.
windowDuration : str
A string specifying the width of the window, e.g. `10 minutes`,
`1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
valid duration identifiers. Note that the duration is a fixed length of
time, and does not vary over time according to a calendar. For example,
`1 day` always means 86,400,000 milliseconds, not a calendar day.
slideDuration : str, optional
A new window will be generated every `slideDuration`. Must be less than
or equal to the `windowDuration`. Check
`org.apache.spark.unsafe.types.CalendarInterval` for valid duration
identifiers. This duration is likewise absolute, and does not vary
according to a calendar.
startTime : str, optional
The offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that
start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide
`startTime` as `15 minutes`.
Examples
--------
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start='2016-03-11 09:00:05', end='2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
def session_window(timeColumn, gapDuration):
"""
Generates session window given a timestamp specifying column.
Session window is one of dynamic windows, which means the length of window is varying
according to the given inputs. The length of session window is defined as "the timestamp
of latest input of the session + gap duration", so when the new inputs are bound to the
current session window, the end time of session window can be expanded according to the new
inputs.
Windows can support microsecond precision. Windows in the order of months are not supported.
For a streaming query, you may use the function `current_timestamp` to generate windows on
processing time.
gapDuration is provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
It could also be a Column which can be evaluated to gap duration dynamically based on the
input row.
The output column will be a struct called 'session_window' by default with the nested columns
'start' and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
.. versionadded:: 3.2.0
Parameters
----------
timeColumn : :class:`~pyspark.sql.Column`
The column or the expression to use as the timestamp for windowing by time.
The time column must be of TimestampType.
gapDuration : :class:`~pyspark.sql.Column` or str
A column or string specifying the timeout of the session. It could be static value,
e.g. `10 minutes`, `1 second`, or an expression/UDF that specifies gap
duration dynamically based on the input row.
Examples
--------
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(session_window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.session_window.start.cast("string").alias("start"),
... w.session_window.end.cast("string").alias("end"), "sum").collect()
[Row(start='2016-03-11 09:00:07', end='2016-03-11 09:00:12', sum=1)]
>>> w = df.groupBy(session_window("date", lit("5 seconds"))).agg(sum("val").alias("sum"))
>>> w.select(w.session_window.start.cast("string").alias("start"),
... w.session_window.end.cast("string").alias("end"), "sum").collect()
[Row(start='2016-03-11 09:00:07', end='2016-03-11 09:00:12', sum=1)]
"""
def check_field(field, fieldName):
if field is None or not isinstance(field, (str, Column)):
raise TypeError("%s should be provided as a string or Column" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_field(gapDuration, "gapDuration")
gap_duration = (
gapDuration
if isinstance(gapDuration, str)
else _to_java_column(gapDuration)
)
res = sc._jvm.functions.session_window(time_col, gap_duration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash='902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
def sha1(col):
"""Returns the hex string result of SHA-1.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash='3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
.. versionadded:: 1.5.0
Examples
--------
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s='3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s='cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
.. versionadded:: 2.0.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
.. versionadded:: 3.0.0
Examples
--------
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def assert_true(col, errMsg=None):
"""
Returns null if the input column is true; throws an exception with the provided error message
otherwise.
.. versionadded:: 3.1.0
Examples
--------
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b).alias('r')).collect()
[Row(r=None)]
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b, df.a).alias('r')).collect()
[Row(r=None)]
>>> df = spark.createDataFrame([(0,1)], ['a', 'b'])
>>> df.select(assert_true(df.a < df.b, 'error').alias('r')).collect()
[Row(r=None)]
"""
sc = SparkContext._active_spark_context
if errMsg is None:
return Column(sc._jvm.functions.assert_true(_to_java_column(col)))
if not isinstance(errMsg, (str, Column)):
raise TypeError(
"errMsg should be a Column or a str, got {}".format(type(errMsg))
)
errMsg = (
_create_column_from_literal(errMsg)
if isinstance(errMsg, str)
else _to_java_column(errMsg)
)
return Column(sc._jvm.functions.assert_true(_to_java_column(col), errMsg))
@since(3.1)
def raise_error(errMsg):
"""
Throws an exception with the provided error message.
"""
if not isinstance(errMsg, (str, Column)):
raise TypeError(
"errMsg should be a Column or a str, got {}".format(type(errMsg))
)
sc = SparkContext._active_spark_context
errMsg = (
_create_column_from_literal(errMsg)
if isinstance(errMsg, str)
else _to_java_column(errMsg)
)
return Column(sc._jvm.functions.raise_error(errMsg))
# ---------------------- String/Binary functions ------------------------------
@since(1.5)
def upper(col):
"""
Converts a string expression to upper case.
"""
return _invoke_function_over_column("upper", col)
@since(1.5)
def lower(col):
"""
Converts a string expression to lower case.
"""
return _invoke_function_over_column("lower", col)
@since(1.5)
def ascii(col):
"""
Computes the numeric value of the first character of the string column.
"""
return _invoke_function_over_column("ascii", col)
@since(1.5)
def base64(col):
"""
Computes the BASE64 encoding of a binary column and returns it as a string column.
"""
return _invoke_function_over_column("base64", col)
@since(1.5)
def unbase64(col):
"""
Decodes a BASE64 encoded string column and returns it as a binary column.
"""
return _invoke_function_over_column("unbase64", col)
@since(1.5)
def ltrim(col):
"""
Trim the spaces from left end for the specified string value.
"""
return _invoke_function_over_column("ltrim", col)
@since(1.5)
def rtrim(col):
"""
Trim the spaces from right end for the specified string value.
"""
return _invoke_function_over_column("rtrim", col)
@since(1.5)
def trim(col):
"""
Trim the spaces from both ends for the specified string column.
"""
return _invoke_function_over_column("trim", col)
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s='abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
the column name of the numeric value to be formatted
d : int
the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v='5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
.. versionadded:: 1.5.0
Parameters
----------
format : str
string that can contain embedded format tags and used as result column's value
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s to be used in formatting
Examples
--------
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v='5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. versionadded:: 1.5.0
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
def overlay(src, replace, pos, len=-1):
"""
Overlay the specified portion of `src` with `replace`,
starting from byte position `pos` of `src` and proceeding for `len` bytes.
.. versionadded:: 3.0.0
Examples
--------
>>> df = spark.createDataFrame([("SPARK_SQL", "CORE")], ("x", "y"))
>>> df.select(overlay("x", "y", 7).alias("overlayed")).show()
+----------+
| overlayed|
+----------+
|SPARK_CORE|
+----------+
"""
if not isinstance(pos, (int, str, Column)):
raise TypeError(
"pos should be an integer or a Column / column name, got {}".format(type(pos)))
if len is not None and not isinstance(len, (int, str, Column)):
raise TypeError(
"len should be an integer or a Column / column name, got {}".format(type(len)))
pos = _create_column_from_literal(pos) if isinstance(pos, int) else _to_java_column(pos)
len = _create_column_from_literal(len) if isinstance(len, int) else _to_java_column(len)
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.overlay(
_to_java_column(src),
_to_java_column(replace),
pos,
len
))
def sentences(string, language=None, country=None):
"""
Splits a string into arrays of sentences, where each sentence is an array of words.
The 'language' and 'country' arguments are optional, and if omitted, the default locale is used.
.. versionadded:: 3.2.0
Parameters
----------
string : :class:`~pyspark.sql.Column` or str
a string to be split
language : :class:`~pyspark.sql.Column` or str, optional
a language of the locale
country : :class:`~pyspark.sql.Column` or str, optional
a country of the locale
Examples
--------
>>> df = spark.createDataFrame([["This is an example sentence."]], ["string"])
>>> df.select(sentences(df.string, lit("en"), lit("US"))).show(truncate=False)
+-----------------------------------+
|sentences(string, en, US) |
+-----------------------------------+
|[[This, is, an, example, sentence]]|
+-----------------------------------+
"""
if language is None:
language = lit("")
if country is None:
country = lit("")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sentences(
_to_java_column(string),
_to_java_column(language),
_to_java_column(country)
))
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. versionadded:: 1.5.0
Notes
-----
The position is not zero based, but 1 based index.
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s='ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s='a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s='b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
.. versionadded:: 1.5.0
Examples
--------
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. versionadded:: 1.5.0
Parameters
----------
substr : str
a string
str : :class:`~pyspark.sql.Column` or str
a Column of :class:`pyspark.sql.types.StringType`
pos : int, optional
start position (zero based)
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s='##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s='abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s='ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
.. versionadded:: 1.5.0
Parameters
----------
str : :class:`~pyspark.sql.Column` or str
a string expression to split
pattern : str
a string representing a regular expression. The regex string should be
a Java regular expression.
limit : int, optional
an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
Examples
--------
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=['one', 'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=['one', 'two', 'three', ''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d='100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d='')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d='')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d='-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v='Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
def soundex(col):
"""
Returns the SoundEx encoding for a string
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex='P362'), Row(soundex='U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
def bin(col):
"""Returns the string representation of the binary value of the given column.
.. versionadded:: 1.5.0
Examples
--------
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c='10'), Row(c='101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)='414243', hex(b)='3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
.. versionadded:: 1.5.0
Examples
--------
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r='1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
def create_map(*cols):
"""Creates a new map column.
.. versionadded:: 2.0.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
Examples
--------
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={'Alice': 2}), Row(map={'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={'Alice': 2}), Row(map={'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing a set of keys. All elements should not be null
col2 : :class:`~pyspark.sql.Column` or str
name of column containing a set of values
Examples
--------
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|{2 -> a, 5 -> b}|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
def array(*cols):
"""Creates a new array column.
.. versionadded:: 1.4.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s that have
the same data type.
Examples
--------
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing array
value :
value or column to check for in array
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
>>> df.select(array_contains(df.data, lit("a"))).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
value = value._jc if isinstance(value, Column) else value
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
.. versionadded:: 2.4.0
Parameters
----------
x : :class:`~pyspark.sql.Column` or str
the array to be sliced
start : :class:`~pyspark.sql.Column` or int
the starting index
length : :class:`~pyspark.sql.Column` or int
the length of the slice
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(
_to_java_column(x),
start._jc if isinstance(start, Column) else start,
length._jc if isinstance(length, Column) else length
))
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined='a,b,c'), Row(joined='a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined='a,b,c'), Row(joined='a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
.. versionadded:: 1.5.0
Examples
--------
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s='abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. versionadded:: 2.4.0
Notes
-----
The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
Examples
--------
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing array or map
extraction :
index to check for in array or key to check for in map
Notes
-----
The position is not zero based, but 1 based index.
Examples
--------
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)='a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, lit("a"))).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(
_to_java_column(col), lit(extraction)._jc))
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing array
element :
element to be removed from the array
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing array
col2 : :class:`~pyspark.sql.Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=['a', 'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing array
col2 : :class:`~pyspark.sql.Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=['b', 'a', 'c', 'd', 'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
.. versionadded:: 2.4.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of column containing array
col2 : :class:`~pyspark.sql.Column` or str
name of column containing array
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=['b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 1.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
def posexplode(col):
"""
Returns a new row for each element with position in the given array or map.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.1.0
Examples
--------
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
def explode_outer(col):
"""
Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|{x -> 1.0}| foo|
| 1|{x -> 1.0}| bar|
| 2| {}|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
def posexplode_outer(col):
"""
Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
.. versionadded:: 2.3.0
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|{x -> 1.0}| 0| foo|
| 1|{x -> 1.0}| 1| bar|
| 2| {}|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
.. versionadded:: 1.6.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in json format
path : str
path to the json object to extract
Examples
--------
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
.. versionadded:: 1.6.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in json format
fields : str
fields to extract
Examples
--------
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key='1', c0='value1', c1='value2'), Row(key='2', c0='value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
def from_json(col, schema, options=None):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in json format
schema : :class:`DataType` or str
a StructType or ArrayType of StructType to use when parsing the json column.
.. versionchanged:: 2.3
the DDL-formatted string is also supported for ``schema``.
options : dict, optional
options to control parsing. accepts the same options as the json datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
def to_json(col, options=None):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
.. versionadded:: 2.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing a struct, an array or a map.
options : dict, optional
options to control converting. accepts the same options as the JSON datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
in the version you use.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
.. # noqa
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(age=2, name='Alice'))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(age=2, name='Alice'), Row(age=3, name='Bob')])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json='["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options))
return Column(jc)
def schema_of_json(json, options=None):
"""
Parses a JSON string and infers its schema in DDL format.
.. versionadded:: 2.4.0
Parameters
----------
json : :class:`~pyspark.sql.Column` or str
a JSON string or a foldable string column containing a JSON string.
options : dict, optional
options to control parsing. accepts the same options as the JSON datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-json.html#data-source-option>`_
in the version you use.
.. # noqa
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
Examples
--------
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json='STRUCT<`a`: BIGINT>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json='STRUCT<`a`: BIGINT>')]
"""
if isinstance(json, str):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options))
return Column(jc)
def schema_of_csv(csv, options=None):
"""
Parses a CSV string and infers its schema in DDL format.
.. versionadded:: 3.0.0
Parameters
----------
csv : :class:`~pyspark.sql.Column` or str
a CSV string or a foldable string column containing a CSV string.
options : dict, optional
options to control parsing. accepts the same options as the CSV datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
"""
if isinstance(csv, str):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options))
return Column(jc)
def to_csv(col, options=None):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column containing a struct.
options: dict, optional
options to control converting. accepts the same options as the CSV datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> from pyspark.sql import Row
>>> data = [(1, Row(age=2, name='Alice'))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv='2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options))
return Column(jc)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
def array_min(col):
"""
Collection function: returns the minimum value of the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
def array_max(col):
"""
Collection function: returns the maximum value of the array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
asc : bool, optional
Examples
--------
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Notes
-----
The function is non-deterministic.
Examples
--------
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
.. versionadded:: 1.5.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s='LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
.. versionadded:: 2.3.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
.. versionadded:: 2.3.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[{1, a}, {2, b}]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
.. versionadded:: 2.4.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
Examples
--------
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|{1 -> a, 2 -> b}|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
.. versionadded:: 2.4.0
Examples
--------
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=['ab', 'ab', 'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(
_to_java_column(col),
_to_java_column(count) if isinstance(count, Column) else count
))
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
.. versionadded:: 2.4.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
columns of arrays to be merged.
Examples
--------
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
def map_concat(*cols):
"""Returns the union of all the given maps.
.. versionadded:: 2.4.0
Parameters
----------
cols : :class:`~pyspark.sql.Column` or str
column names or :class:`~pyspark.sql.Column`\\s
Examples
--------
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|{1 -> a, 2 -> b, 3 -> c}|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
.. versionadded:: 2.4.0
Examples
--------
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
def from_csv(col, schema, options=None):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
.. versionadded:: 3.0.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
string column in CSV format
schema :class:`~pyspark.sql.Column` or str
a string with schema in DDL format to use when parsing the CSV column.
options : dict, optional
options to control parsing. accepts the same options as the CSV datasource.
See `Data Source Option <https://spark.apache.org/docs/latest/sql-data-sources-csv.html#data-source-option>`_
in the version you use.
.. # noqa
Examples
--------
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
>>> data = [(" abc",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> options = {'ignoreLeadingWhiteSpace': True}
>>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect()
[Row(csv=Row(s='abc'))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, str):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
def _unresolved_named_lambda_variable(*name_parts):
"""
Create `o.a.s.sql.expressions.UnresolvedNamedLambdaVariable`,
convert it to o.s.sql.Column and wrap in Python `Column`
Parameters
----------
name_parts : str
"""
sc = SparkContext._active_spark_context
name_parts_seq = _to_seq(sc, name_parts)
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
return Column(
sc._jvm.Column(
expressions.UnresolvedNamedLambdaVariable(name_parts_seq)
)
)
def _get_lambda_parameters(f):
import inspect
signature = inspect.signature(f)
parameters = signature.parameters.values()
# We should exclude functions that use
# variable args and keyword argnames
# as well as keyword only args
supported_parameter_types = {
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY,
}
# Validate that
# function arity is between 1 and 3
if not (1 <= len(parameters) <= 3):
raise ValueError(
"f should take between 1 and 3 arguments, but provided function takes {}".format(
len(parameters)
)
)
# and all arguments can be used as positional
if not all(p.kind in supported_parameter_types for p in parameters):
raise ValueError(
"f should use only POSITIONAL or POSITIONAL OR KEYWORD arguments"
)
return parameters
def _create_lambda(f):
"""
Create `o.a.s.sql.expressions.LambdaFunction` corresponding
to transformation described by f
:param f: A Python of one of the following forms:
- (Column) -> Column: ...
- (Column, Column) -> Column: ...
- (Column, Column, Column) -> Column: ...
"""
parameters = _get_lambda_parameters(f)
sc = SparkContext._active_spark_context
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
argnames = ["x", "y", "z"]
args = [
_unresolved_named_lambda_variable(
expressions.UnresolvedNamedLambdaVariable.freshVarName(arg)
)
for arg in argnames[: len(parameters)]
]
result = f(*args)
if not isinstance(result, Column):
raise ValueError("f should return Column, got {}".format(type(result)))
jexpr = result._jc.expr()
jargs = _to_seq(sc, [arg._jc.expr() for arg in args])
return expressions.LambdaFunction(jexpr, jargs, False)
def _invoke_higher_order_function(name, cols, funs):
"""
Invokes expression identified by name,
(relative to ```org.apache.spark.sql.catalyst.expressions``)
and wraps the result with Column (first Scala one, then Python).
:param name: Name of the expression
:param cols: a list of columns
:param funs: a list of((*Column) -> Column functions.
:return: a Column
"""
sc = SparkContext._active_spark_context
expressions = sc._jvm.org.apache.spark.sql.catalyst.expressions
expr = getattr(expressions, name)
jcols = [_to_java_column(col).expr() for col in cols]
jfuns = [_create_lambda(f) for f in funs]
return Column(sc._jvm.Column(expr(*jcols + jfuns)))
def transform(col, f):
"""
Returns an array of elements after applying a transformation to each element in the input array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a function that is applied to each element of the input array.
Can take one of the following forms:
- Unary ``(x: Column) -> Column: ...``
- Binary ``(x: Column, i: Column) -> Column...``, where the second argument is
a 0-based index of the element.
and can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 2, 3, 4])], ("key", "values"))
>>> df.select(transform("values", lambda x: x * 2).alias("doubled")).show()
+------------+
| doubled|
+------------+
|[2, 4, 6, 8]|
+------------+
>>> def alternate(x, i):
... return when(i % 2 == 0, x).otherwise(-x)
>>> df.select(transform("values", alternate).alias("alternated")).show()
+--------------+
| alternated|
+--------------+
|[1, -2, 3, -4]|
+--------------+
"""
return _invoke_higher_order_function("ArrayTransform", [col], [f])
def exists(col, f):
"""
Returns whether a predicate holds for one or more elements in the array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
``(x: Column) -> Column: ...`` returning the Boolean expression.
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
:return: a :class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 2, 3, 4]), (2, [3, -1, 0])],("key", "values"))
>>> df.select(exists("values", lambda x: x < 0).alias("any_negative")).show()
+------------+
|any_negative|
+------------+
| false|
| true|
+------------+
"""
return _invoke_higher_order_function("ArrayExists", [col], [f])
def forall(col, f):
"""
Returns whether a predicate holds for every element in the array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
``(x: Column) -> Column: ...`` returning the Boolean expression.
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["bar"]), (2, ["foo", "bar"]), (3, ["foobar", "foo"])],
... ("key", "values")
... )
>>> df.select(forall("values", lambda x: x.rlike("foo")).alias("all_foo")).show()
+-------+
|all_foo|
+-------+
| false|
| false|
| true|
+-------+
"""
return _invoke_higher_order_function("ArrayForAll", [col], [f])
def filter(col, f):
"""
Returns an array of elements for which a predicate holds in a given array.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
A function that returns the Boolean expression.
Can take one of the following forms:
- Unary ``(x: Column) -> Column: ...``
- Binary ``(x: Column, i: Column) -> Column...``, where the second argument is
a 0-based index of the element.
and can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame(
... [(1, ["2018-09-20", "2019-02-03", "2019-07-01", "2020-06-01"])],
... ("key", "values")
... )
>>> def after_second_quarter(x):
... return month(to_date(x)) > 6
>>> df.select(
... filter("values", after_second_quarter).alias("after_second_quarter")
... ).show(truncate=False)
+------------------------+
|after_second_quarter |
+------------------------+
|[2018-09-20, 2019-07-01]|
+------------------------+
"""
return _invoke_higher_order_function("ArrayFilter", [col], [f])
def aggregate(col, initialValue, merge, finish=None):
"""
Applies a binary operator to an initial state and all elements in the array,
and reduces this to a single state. The final state is converted into the final result
by applying a finish function.
Both functions can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
initialValue : :class:`~pyspark.sql.Column` or str
initial value. Name of column or expression
merge : function
a binary function ``(acc: Column, x: Column) -> Column...`` returning expression
of the same type as ``zero``
finish : function
an optional unary function ``(x: Column) -> Column: ...``
used to convert accumulated value.
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [20.0, 4.0, 2.0, 6.0, 10.0])], ("id", "values"))
>>> df.select(aggregate("values", lit(0.0), lambda acc, x: acc + x).alias("sum")).show()
+----+
| sum|
+----+
|42.0|
+----+
>>> def merge(acc, x):
... count = acc.count + 1
... sum = acc.sum + x
... return struct(count.alias("count"), sum.alias("sum"))
>>> df.select(
... aggregate(
... "values",
... struct(lit(0).alias("count"), lit(0.0).alias("sum")),
... merge,
... lambda acc: acc.sum / acc.count,
... ).alias("mean")
... ).show()
+----+
|mean|
+----+
| 8.4|
+----+
"""
if finish is not None:
return _invoke_higher_order_function(
"ArrayAggregate",
[col, initialValue],
[merge, finish]
)
else:
return _invoke_higher_order_function(
"ArrayAggregate",
[col, initialValue],
[merge]
)
def zip_with(left, right, f):
"""
Merge two given arrays, element-wise, into a single array using a function.
If one array is shorter, nulls are appended at the end to match the length of the longer
array, before applying the function.
.. versionadded:: 3.1.0
Parameters
----------
left : :class:`~pyspark.sql.Column` or str
name of the first column or expression
right : :class:`~pyspark.sql.Column` or str
name of the second column or expression
f : function
a binary function ``(x1: Column, x2: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, [1, 3, 5, 8], [0, 2, 4, 6])], ("id", "xs", "ys"))
>>> df.select(zip_with("xs", "ys", lambda x, y: x ** y).alias("powers")).show(truncate=False)
+---------------------------+
|powers |
+---------------------------+
|[1.0, 9.0, 625.0, 262144.0]|
+---------------------------+
>>> df = spark.createDataFrame([(1, ["foo", "bar"], [1, 2, 3])], ("id", "xs", "ys"))
>>> df.select(zip_with("xs", "ys", lambda x, y: concat_ws("_", x, y)).alias("xs_ys")).show()
+-----------------+
| xs_ys|
+-----------------+
|[foo_1, bar_2, 3]|
+-----------------+
"""
return _invoke_higher_order_function("ZipWith", [left, right], [f])
def transform_keys(col, f):
"""
Applies a function to every key-value pair in a map and returns
a map with the results of those applications as the new keys for the pairs.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"foo": -2.0, "bar": 2.0})], ("id", "data"))
>>> df.select(transform_keys(
... "data", lambda k, _: upper(k)).alias("data_upper")
... ).show(truncate=False)
+-------------------------+
|data_upper |
+-------------------------+
|{BAR -> 2.0, FOO -> -2.0}|
+-------------------------+
"""
return _invoke_higher_order_function("TransformKeys", [col], [f])
def transform_values(col, f):
"""
Applies a function to every key-value pair in a map and returns
a map with the results of those applications as the new values for the pairs.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"IT": 10.0, "SALES": 2.0, "OPS": 24.0})], ("id", "data"))
>>> df.select(transform_values(
... "data", lambda k, v: when(k.isin("IT", "OPS"), v + 10.0).otherwise(v)
... ).alias("new_data")).show(truncate=False)
+---------------------------------------+
|new_data |
+---------------------------------------+
|{OPS -> 34.0, IT -> 20.0, SALES -> 2.0}|
+---------------------------------------+
"""
return _invoke_higher_order_function("TransformValues", [col], [f])
def map_filter(col, f):
"""
Returns a map whose key-value pairs satisfy a predicate.
.. versionadded:: 3.1.0
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
f : function
a binary function ``(k: Column, v: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([(1, {"foo": 42.0, "bar": 1.0, "baz": 32.0})], ("id", "data"))
>>> df.select(map_filter(
... "data", lambda _, v: v > 30.0).alias("data_filtered")
... ).show(truncate=False)
+--------------------------+
|data_filtered |
+--------------------------+
|{baz -> 32.0, foo -> 42.0}|
+--------------------------+
"""
return _invoke_higher_order_function("MapFilter", [col], [f])
def map_zip_with(col1, col2, f):
"""
Merge two given maps, key-wise into a single map using a function.
.. versionadded:: 3.1.0
Parameters
----------
col1 : :class:`~pyspark.sql.Column` or str
name of the first column or expression
col2 : :class:`~pyspark.sql.Column` or str
name of the second column or expression
f : function
a ternary function ``(k: Column, v1: Column, v2: Column) -> Column...``
Can use methods of :class:`~pyspark.sql.Column`, functions defined in
:py:mod:`pyspark.sql.functions` and Scala ``UserDefinedFunctions``.
Python ``UserDefinedFunctions`` are not supported
(`SPARK-27052 <https://issues.apache.org/jira/browse/SPARK-27052>`__).
Returns
-------
:class:`~pyspark.sql.Column`
Examples
--------
>>> df = spark.createDataFrame([
... (1, {"IT": 24.0, "SALES": 12.00}, {"IT": 2.0, "SALES": 1.4})],
... ("id", "base", "ratio")
... )
>>> df.select(map_zip_with(
... "base", "ratio", lambda k, v1, v2: round(v1 * v2, 2)).alias("updated_data")
... ).show(truncate=False)
+---------------------------+
|updated_data |
+---------------------------+
|{SALES -> 16.8, IT -> 48.0}|
+---------------------------+
"""
return _invoke_higher_order_function("MapZipWith", [col1, col2], [f])
# ---------------------- Partition transform functions --------------------------------
def years(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into years.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... years("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.years(_to_java_column(col)))
def months(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into months.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy(
... months("ts")
... ).createOrReplace() # doctest: +SKIP
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months(_to_java_column(col)))
def days(col):
"""
Partition transform function: A transform for timestamps and dates
to partition data into days.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... days("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.days(_to_java_column(col)))
def hours(col):
"""
Partition transform function: A transform for timestamps
to partition data into hours.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... hours("ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hours(_to_java_column(col)))
def bucket(numBuckets, col):
"""
Partition transform function: A transform for any type that partitions
by a hash of the input column.
.. versionadded:: 3.1.0
Examples
--------
>>> df.writeTo("catalog.db.table").partitionedBy( # doctest: +SKIP
... bucket(42, "ts")
... ).createOrReplace()
Notes
-----
This function can be used only in combination with
:py:meth:`~pyspark.sql.readwriter.DataFrameWriterV2.partitionedBy`
method of the `DataFrameWriterV2`.
"""
if not isinstance(numBuckets, (int, Column)):
raise TypeError(
"numBuckets should be a Column or an int, got {}".format(type(numBuckets))
)
sc = SparkContext._active_spark_context
numBuckets = (
_create_column_from_literal(numBuckets)
if isinstance(numBuckets, int)
else _to_java_column(numBuckets)
)
return Column(sc._jvm.functions.bucket(numBuckets, _to_java_column(col)))
# ---------------------------- User Defined Function ----------------------------------
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. versionadded:: 1.3.0
Parameters
----------
f : function
python function if used as a standalone function
returnType : :class:`pyspark.sql.types.DataType` or str
the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
Notes
-----
The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
The user-defined functions do not take keyword arguments on the calling side.
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28131's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: 'X' means it throws an exception during the conversion.
# Note: Python 3.7.3 is used.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(age=2, name='Alice'), Row(age=5, name='Bob')])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 32.403427 | 309 | 0.600743 |
acde81a3b915fbb24a4f91b100eeeead292899e3 | 6,208 | py | Python | straxen/itp_map.py | jhowl01/straxen | 60f414b910620bdb3a281177cd8a761927e8d50a | [
"BSD-3-Clause"
] | null | null | null | straxen/itp_map.py | jhowl01/straxen | 60f414b910620bdb3a281177cd8a761927e8d50a | [
"BSD-3-Clause"
] | null | null | null | straxen/itp_map.py | jhowl01/straxen | 60f414b910620bdb3a281177cd8a761927e8d50a | [
"BSD-3-Clause"
] | null | null | null | import logging
import gzip
import json
import re
import numpy as np
from scipy.spatial import cKDTree
import strax
export, __all__ = strax.exporter()
@export
class InterpolateAndExtrapolate:
"""Linearly interpolate- and extrapolate using inverse-distance
weighted averaging between nearby points.
"""
def __init__(self, points, values, neighbours_to_use=None, array_valued=False):
"""
:param points: array (n_points, n_dims) of coordinates
:param values: array (n_points) of values
:param neighbours_to_use: Number of neighbouring points to use for
averaging. Default is 2 * dimensions of points.
"""
self.kdtree = cKDTree(points)
self.values = values
if neighbours_to_use is None:
neighbours_to_use = points.shape[1] * 2
self.neighbours_to_use = neighbours_to_use
self.array_valued = array_valued
if array_valued:
self.n_dim = self.values.shape[-1]
def __call__(self, points):
distances, indices = self.kdtree.query(points, self.neighbours_to_use)
result = np.ones(len(points)) * float('nan')
if self.array_valued:
result = np.repeat(result.reshape(-1, 1), self.n_dim, axis=1)
# If one of the coordinates is NaN, the neighbour-query fails.
# If we don't filter these out, it would result in an IndexError
# as the kdtree returns an invalid index if it can't find neighbours.
valid = (distances < float('inf')).max(axis=-1)
values = self.values[indices[valid]]
weights = 1 / np.clip(distances[valid], 1e-6, float('inf'))
if self.array_valued:
weights = np.repeat(weights, self.n_dim).reshape(values.shape)
result[valid] = np.average(values, weights=weights,
axis=-2 if self.array_valued else -1)
return result
@export
class InterpolatingMap:
"""Correction map that computes values using inverse-weighted distance
interpolation.
The map must be specified as a json translating to a dictionary like this:
'coordinate_system' : [[x1, y1], [x2, y2], [x3, y3], [x4, y4], ...],
'map' : [value1, value2, value3, value4, ...]
'another_map' : idem
'name': 'Nice file with maps',
'description': 'Say what the maps are, who you are, etc',
'timestamp': unix epoch seconds timestamp
with the straightforward generalization to 1d and 3d.
Alternatively, a grid coordinate system can be specified as follows:
'coordinate_system' : [['x', [x_min, x_max, n_x]], [['y', [y_min, y_max, n_y]]
Alternatively, an N-vector-valued map can be specified by an array with
last dimension N in 'map'.
The default map name is 'map', I'd recommend you use that.
For a 0d placeholder map, use
'points': [],
'map': 42,
etc
"""
data_field_names = ['timestamp', 'description', 'coordinate_system',
'name', 'irregular']
def __init__(self, data):
if isinstance(data, bytes):
data = gzip.decompress(data).decode()
if isinstance(data, (str, bytes)):
data = json.loads(data)
assert isinstance(data, dict), f"Expected dictionary data, got {type(data)}"
self.data = data
# Decompress / dequantize the map
# TODO: support multiple map names
if 'compressed' in self.data:
compressor, dtype, shape = self.data['compressed']
self.data['map'] = np.frombuffer(
strax.io.COMPRESSORS[compressor]['decompress'](self.data['map']),
dtype=dtype).reshape(*shape)
del self.data['compressed']
if 'quantized' in self.data:
self.data['map'] = self.data['quantized'] * self.data['map'].astype(np.float32)
del self.data['quantized']
cs = self.data['coordinate_system']
if not len(cs):
self.dimensions = 0
elif isinstance(cs[0], list) and isinstance(cs[0][0], str):
# Support for specifying coordinate system as a gridspec
grid = [np.linspace(left, right, points)
for _, (left, right, points) in cs]
cs = np.array(np.meshgrid(*grid))
cs = np.transpose(cs, np.roll(np.arange(len(grid)+1), -1))
cs = np.array(cs).reshape((-1, len(grid)))
self.dimensions = len(grid)
else:
self.dimensions = len(cs[0])
self.coordinate_system = cs
self.interpolators = {}
self.map_names = sorted([k for k in self.data.keys()
if k not in self.data_field_names])
log = logging.getLogger('InterpolatingMap')
log.debug('Map name: %s' % self.data['name'])
log.debug('Map description:\n ' +
re.sub(r'\n', r'\n ', self.data['description']))
log.debug("Map names found: %s" % self.map_names)
for map_name in self.map_names:
map_data = np.array(self.data[map_name])
array_valued = len(map_data.shape) == self.dimensions + 1
if self.dimensions == 0:
# 0 D -- placeholder maps which take no arguments
# and always return a single value
def itp_fun(positions):
return map_data * np.ones_like(positions)
if array_valued:
map_data = map_data.reshape((-1, map_data.shape[-1]))
itp_fun = InterpolateAndExtrapolate(points=np.array(cs),
values=np.array(map_data),
array_valued=array_valued)
self.interpolators[map_name] = itp_fun
def __call__(self, positions, map_name='map'):
"""Returns the value of the map at the position given by coordinates
:param positions: array (n_dim) or (n_points, n_dim) of positions
:param map_name: Name of the map to use. Default is 'map'.
"""
return self.interpolators[map_name](positions)
| 39.794872 | 91 | 0.588434 |
acde81c90b75c73c22a880177927cbedb246d545 | 93 | py | Python | measurements/__main__.py | cyBerta/api | af8e15a800ffc272799a269f4801cbdd174d0c43 | [
"BSD-3-Clause"
] | null | null | null | measurements/__main__.py | cyBerta/api | af8e15a800ffc272799a269f4801cbdd174d0c43 | [
"BSD-3-Clause"
] | null | null | null | measurements/__main__.py | cyBerta/api | af8e15a800ffc272799a269f4801cbdd174d0c43 | [
"BSD-3-Clause"
] | null | null | null | import sys
from measurements.cli import cli
if __name__ == "__main__":
sys.exit(cli())
| 13.285714 | 32 | 0.698925 |
acde828341e57439681798b25a0244f1823cdc83 | 2,674 | py | Python | text/cleaners.py | Ella77/tacotron2 | cdf30d0c175d0a1d5c1b2131a8e922ed7ce93445 | [
"BSD-3-Clause"
] | 36 | 2018-12-20T08:58:10.000Z | 2022-03-30T04:31:01.000Z | text/cleaners.py | Ella77/tacotron2 | cdf30d0c175d0a1d5c1b2131a8e922ed7ce93445 | [
"BSD-3-Clause"
] | null | null | null | text/cleaners.py | Ella77/tacotron2 | cdf30d0c175d0a1d5c1b2131a8e922ed7ce93445 | [
"BSD-3-Clause"
] | 6 | 2019-07-29T04:59:33.000Z | 2021-08-24T08:20:16.000Z | """ from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from text.numbers import normalize_numbers
from text.korean import tokenize as ko_tokenize
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
def korean_cleaners(text):
'''Pipeline for Korean text, including number and abbreviation expansion.'''
text = ko_tokenize(text, as_id=False)
#print(text)
return text | 27.56701 | 95 | 0.706432 |
acde83f1e89313ed5d465cb1b6da92d21caa47e1 | 1,200 | py | Python | tumor-prediction.py | mlatmd/intro-to-ml | 515e8fc8955b7d5289064c4d7b9e21d3e0a16351 | [
"MIT"
] | 2 | 2017-10-26T23:12:46.000Z | 2017-10-27T02:43:48.000Z | tumor-prediction.py | mlatmd/intro-to-ml | 515e8fc8955b7d5289064c4d7b9e21d3e0a16351 | [
"MIT"
] | null | null | null | tumor-prediction.py | mlatmd/intro-to-ml | 515e8fc8955b7d5289064c4d7b9e21d3e0a16351 | [
"MIT"
] | 1 | 2018-03-13T04:20:32.000Z | 2018-03-13T04:20:32.000Z | import pandas
import numpy
from sklearn import ensemble
from sklearn.model_selection import train_test_split
# Read in the breast cancer data.csv
data = pandas.read_csv("data.csv", header=0)
# Take a look at pandas dataframe format
# print(data.head())
# Data cleaning
mapping = {'M' : 0, 'B' : 1}
data['diagnosis'] = data['diagnosis'].map(mapping)
features = list(data.columns[1:31]) # Appending all the columns in feature vector
train_features, test_features, train_labels, test_labels = train_test_split(data[features], data['diagnosis'].values, test_size=0.20, random_state=10)
# Get the random forest classifier from the scikit library we imported
classifier = ensemble.RandomForestClassifier()
# Train your classifier with our training data split
trained_classifier = classifier.fit(train_features.values, train_labels)
# Let's try out our trained classifier
y_prediction = trained_classifier.predict(test_features.values)
# Print out the predictions vs the actual values
print(y_prediction)
print(test_labels)
num_correct_predictions = numpy.sum(y_prediction == test_labels)
num_test_samples = float(len(test_labels))
print ("ML Accuracy", num_correct_predictions / num_test_samples) | 34.285714 | 150 | 0.7925 |
acde83f2e527fed5c9cceab4359bb737bff12142 | 16,277 | py | Python | lib_core/datamart_core/common.py | VIDA-NYU/auctus | 49e83f9f61f3e35422b8c5fd40b9d5376b9f92a2 | [
"Apache-2.0"
] | 15 | 2020-12-22T15:28:56.000Z | 2022-03-29T08:13:49.000Z | lib_core/datamart_core/common.py | VIDA-NYU/auctus | 49e83f9f61f3e35422b8c5fd40b9d5376b9f92a2 | [
"Apache-2.0"
] | 4 | 2021-04-09T18:08:36.000Z | 2022-03-21T17:51:05.000Z | lib_core/datamart_core/common.py | VIDA-NYU/auctus | 49e83f9f61f3e35422b8c5fd40b9d5376b9f92a2 | [
"Apache-2.0"
] | 4 | 2021-08-12T09:34:29.000Z | 2022-01-03T22:30:50.000Z | import aio_pika
import asyncio
import elasticsearch
import elasticsearch.helpers
import functools
import hashlib
import html.entities
import json
import logging
import os
import re
import sentry_sdk
import sys
import threading
from . import types
logger = logging.getLogger(__name__)
class ThreadFormatter(logging.Formatter):
"""Variant of `Formatter` that shows the thread if it's not the main one.
"""
_main_thread = threading.main_thread().ident
def __init__(self, fmt=None, datefmt=None, threadedfmt=None):
"""Use ``%(threaded)s`` in ``fmt`` for the thread info, if not main.
``%(threaded)s`` expands to an empty string on the main thread, or to
the thread ID otherwise.
You can control the format of the ``threaded`` string by passing a
`threadedfmt` argument, which defaults to ``' %(thread)d'``.
Example usage::
handler.formatter = ThreadFormatter(
fmt="%(levelname)s %(name)s%(threaded)s: %(message)s",
threadedfmt=" thread=%(thread)d",
)
"""
super(ThreadFormatter, self).__init__(fmt, datefmt)
self._threadedfmt = threadedfmt or ' %(thread)d'
def formatMessage(self, record):
if record.thread != self._main_thread:
record.threaded = self._threadedfmt % dict(
thread=record.thread,
threadname=record.threadName,
)
else:
record.threaded = ''
return super(ThreadFormatter, self).formatMessage(record)
class JsonFormatter(logging.Formatter):
_main_thread = threading.main_thread().ident
def __init__(self):
super(JsonFormatter, self).__init__(
fmt='%(message)s',
style='%',
)
self._encoder = json.JSONEncoder(default=repr)
def format(self, record):
record.message = record.getMessage()
dct = {
'severity': record.levelname,
'message': record.message,
'messageFmt': record.msg,
'args': record.args,
'name': record.name,
}
if record.thread != self._main_thread:
dct['thread'] = record.thread
dct['threadname'] = record.threadName
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
dct['exception'] = record.exc_text
if record.stack_info:
dct['stack'] = self.formatStack(record.stack_info)
return self._encoder.encode(dct)
def setup_logging(clear=True, thread=True):
log_json = os.environ.get('LOG_FORMAT', 'text') == 'json'
if clear:
logging.root.handlers.clear()
if log_json:
formatter = JsonFormatter()
elif thread:
formatter = ThreadFormatter(
"%(asctime)s %(levelname)s %(name)s%(threaded)s: %(message)s",
threadedfmt=" thread=%(thread)d",
)
else:
formatter = logging.Formatter(
"%(asctime)s %(levelname)s %(name)s: %(message)s",
style='%',
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logging.basicConfig(level=logging.INFO, handlers=[stream_handler])
def filter_delete(record):
if (
len(record.args) >= 2 and
record.args[0] == 'DELETE' and record.args[1].startswith('http')
):
return False
if (
len(record.args) >= 3 and
record.args[0] == 'GET' and record.args[1].startswith('http') and
record.args[2] == 404
):
return False
return True
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
logging.getLogger('elasticsearch').addFilter(filter_delete)
def filter_sodapy_throttle(record):
if record.msg == (
"Requests made without an app_token will be subject to strict "
+ "throttling limits."
):
return False
return True
logging.root.addFilter(filter_sodapy_throttle)
# Enable Sentry
if os.environ.get('SENTRY_DSN'):
from sentry_sdk.integrations.tornado import TornadoIntegration
logger.info("Initializing Sentry")
sentry_sdk.init(
dsn=os.environ['SENTRY_DSN'],
integrations=[TornadoIntegration()],
ignore_errors=[KeyboardInterrupt],
release='auctus@%s' % os.environ['DATAMART_VERSION'],
)
def block_wait_future(future):
"""Block the current thread until the future is done, return result.
This is like ``await`` but for threads. Do not call this on the event-loop
thread.
"""
event = threading.Event()
future.add_done_callback(lambda *a, **kw: event.set())
event.wait()
return future.result()
def block_run(loop, coro):
"""Block the current thread until the coroutine is done, return result.
The coroutine should not have been submitted to asyncio yet. Do not call
this on the event-loop thread.
"""
future = asyncio.run_coroutine_threadsafe(coro, loop)
return block_wait_future(future)
def json2msg(obj, **kwargs):
return aio_pika.Message(json.dumps(obj).encode('utf-8'), **kwargs)
def msg2json(msg):
return json.loads(msg.body.decode('utf-8'))
_log_future_references = {}
def log_future(future, logger, message="Exception in background task",
should_never_exit=False):
ident = id(future)
def log(future):
_log_future_references.pop(ident, None)
try:
future.result()
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(message)
if should_never_exit:
logger.critical("Critical task died, exiting")
asyncio.get_event_loop().stop()
sys.exit(1)
future.add_done_callback(log)
# Keep a strong reference to the future
# https://bugs.python.org/issue21163
_log_future_references[ident] = future
class PrefixedElasticsearch(object):
def __init__(self):
self.es = elasticsearch.Elasticsearch(
os.environ['ELASTICSEARCH_HOSTS'].split(',')
)
self.prefix = os.environ['ELASTICSEARCH_PREFIX']
def add_prefix(self, index):
return ','.join(self.prefix + idx for idx in index.split(','))
def get(self, index, id, _source=None):
return self.es.get(self.add_prefix(index), id, _source=_source)
def index(self, index, body, id=None):
return self.es.index(index=self.add_prefix(index), body=body, id=id)
def search(self, body=None, index=None,
size=None, from_=None, request_timeout=None):
return self.es.search(
index=self.add_prefix(index),
body=body, size=size, from_=from_, request_timeout=request_timeout,
)
def delete(self, index, id):
return self.es.delete(self.add_prefix(index), id)
def delete_by_query(self, index, body):
return self.es.delete_by_query(index=self.add_prefix(index), body=body)
def index_exists(self, index):
return self.es.indices.exists(self.add_prefix(index))
def index_create(self, index, body=None):
return self.es.indices.create(self.add_prefix(index), body=body)
def scan(self, index, query, **kwargs):
return elasticsearch.helpers.scan(
self.es,
index=self.add_prefix(index),
query=query,
request_timeout=60,
**kwargs,
)
def close(self):
self.es.close()
re_non_path_safe = re.compile(r'[^A-Za-z0-9_.-]')
def encode_dataset_id(dataset_id):
"""Encode a dataset ID to a format suitable for file names.
"""
dataset_id = dataset_id.replace('_', '__')
dataset_id = re_non_path_safe.sub(lambda m: '_%X' % ord(m.group(0)),
dataset_id)
return dataset_id
def decode_dataset_id(dataset_id):
"""Decode a dataset ID encoded using `encode_dataset_id()`.
"""
dataset_id = list(dataset_id)
i = 0
while i < len(dataset_id):
if dataset_id[i] == '_':
if dataset_id[i + 1] == '_':
del dataset_id[i + 1]
else:
char_hex = dataset_id[i + 1:i + 3]
dataset_id[i + 1:i + 3] = []
char_hex = ''.join(char_hex)
dataset_id[i] = chr(int(char_hex, 16))
i += 1
return ''.join(dataset_id)
def hash_json(*args, **kwargs):
if not args:
dct = dict()
elif len(args) == 1:
dct = args[0]
assert isinstance(dct, dict)
else:
raise TypeError("Expected 1 positional argument, got %d" % len(args))
dct.update(**kwargs)
bytes_ = json.dumps(dct, sort_keys=True).encode('utf-8')
return hashlib.sha1(bytes_).hexdigest()
_re_html_link = re.compile(
r'<a [^>]*\bhref="(https?://[^"]+)"[^>]*>(.*?)</a>',
)
_re_html_tag = re.compile(
r'</?(?:a|acronym|br|div|em|h[1-5]|li|ol|p|span|ul)(?: [^>]*)?/?>',
)
_re_html_entities = re.compile(
r'&([A-Za-z]{2,35};)',
)
def _base_url(url):
if url.startswith('http://'):
url = url[7:]
elif url.startswith('https://'):
url = url[8:]
else:
url = url
return url.rstrip('/')
def strip_html(text):
# Replace links
def replace_link(m):
url, label = m.groups()
if _base_url(url) == _base_url(label):
return label
else:
return "%s (%s)" % (label, url)
text = _re_html_link.sub(replace_link, text)
# Strip tags
text = _re_html_tag.sub('', text)
# Fix entities
text = _re_html_entities.sub(
lambda x: html.entities.html5.get(x.group(1), x.group(0)),
text,
)
return text
def contextdecorator(factory, argname):
def inner(wrapped):
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
with factory() as ctx:
kwargs.update({argname: ctx})
return wrapped(*args, **kwargs)
return wrapper
return inner
def add_dataset_to_sup_index(es, dataset_id, metadata):
"""
Adds dataset to the supplementary Datamart indices: 'columns',
'spatial_coverage', and 'temporal_coverage'.
"""
DISCARD_DATASET_FIELDS = [
'columns', 'sample', 'materialize',
'spatial_coverage', 'temporal_coverage',
'manual_annotations',
]
DISCARD_COLUMN_FIELDS = ['plot']
common_dataset_metadata = dict(dataset_id=dataset_id)
for key, value in metadata.items():
if key not in DISCARD_DATASET_FIELDS:
common_dataset_metadata['dataset_' + key] = value
# 'columns' index
for column_index, column in enumerate(metadata['columns']):
column_metadata = dict(column)
for field in DISCARD_COLUMN_FIELDS:
column_metadata.pop(field, None)
column_metadata.update(common_dataset_metadata)
column_metadata['index'] = column_index
if 'coverage' in column_metadata:
column_metadata['coverage'] = [
dict(
num_range,
gte=num_range['range']['gte'],
lte=num_range['range']['lte'],
)
for num_range in column_metadata['coverage']
]
es.index(
'columns',
column_metadata
)
# 'spatial_coverage' index
if 'spatial_coverage' in metadata:
for spatial_coverage in metadata['spatial_coverage']:
spatial_coverage_metadata = dict()
spatial_coverage_metadata.update(common_dataset_metadata)
spatial_coverage_metadata.update(spatial_coverage)
ranges = []
# Keep in sync, search code for 279a32
if 'ranges' in spatial_coverage_metadata:
for spatial_range in spatial_coverage_metadata['ranges']:
coordinates = spatial_range['range']['coordinates']
ranges.append(dict(
spatial_range,
min_lon=coordinates[0][0],
max_lat=coordinates[0][1],
max_lon=coordinates[1][0],
min_lat=coordinates[1][1],
))
spatial_coverage_metadata['ranges'] = ranges
es.index(
'spatial_coverage',
spatial_coverage_metadata,
)
# 'temporal_coverage' index
if 'temporal_coverage' in metadata:
for temporal_coverage in metadata['temporal_coverage']:
temporal_coverage_metadata = dict()
temporal_coverage_metadata.update(common_dataset_metadata)
temporal_coverage_metadata.update(temporal_coverage)
temporal_coverage_metadata['ranges'] = [
dict(
temporal_range,
gte=temporal_range['range']['gte'],
lte=temporal_range['range']['lte'],
)
for temporal_range in temporal_coverage_metadata['ranges']
]
es.index(
'temporal_coverage',
temporal_coverage_metadata,
)
def add_dataset_to_index(es, dataset_id, metadata):
"""
Safely adds a dataset to all the Datamart indices.
"""
# 'datasets' index
es.index(
'datasets',
dict(metadata, id=dataset_id),
id=dataset_id,
)
add_dataset_to_sup_index(
es,
dataset_id,
metadata
)
def add_dataset_to_lazo_storage(es, id, metadata):
"""Adds a dataset to Lazo.
"""
es.index(
'lazo',
metadata,
id=id,
)
def delete_dataset_from_lazo(es, dataset_id, lazo_client):
query = {
'query': {
'bool': {
'must': [
{'term': {'dataset_id': dataset_id}},
{'term': {'structural_type': types.TEXT}}
],
'must_not': {
'term': {'semantic_types': types.DATE_TIME}
}
}
}
}
textual_columns = list()
# FIXME: Use search-after API here?
from_ = 0
while True:
hits = es.search(
index='columns',
body=query,
from_=from_,
size=10000,
)['hits']['hits']
from_ += len(hits)
for h in hits:
textual_columns.append(h['_source']['name'])
if len(hits) != 10000:
break
if textual_columns:
ack = lazo_client.remove_sketches(dataset_id, textual_columns)
if ack:
logger.info(
"Deleted %d documents from Lazo",
len(textual_columns)
)
else:
logger.info("Error while deleting documents from Lazo")
def delete_dataset_from_index(es, dataset_id, lazo_client=None):
"""
Safely deletes a dataset from the 'datasets' index,
including its corresponding information in 'columns', 'spatial_coverage',
and 'temporal_coverage' indices.
This function also connects to the Lazo index service
and deletes any corresponding sketch.
"""
if lazo_client:
# checking if there are any textual columns in the dataset
# remove them from the Lazo index service
delete_dataset_from_lazo(es, dataset_id, lazo_client)
# Remove from alternate index
try:
es.delete('pending', dataset_id)
except elasticsearch.NotFoundError:
pass
# deleting from 'datasets'
try:
es.delete('datasets', dataset_id)
except elasticsearch.NotFoundError:
return
# deleting from 'columns', 'spatial_coverage', and 'temporal_coverage'
query = {
'query': {
'term': {'dataset_id': dataset_id}
}
}
for index in (
'columns',
'spatial_coverage',
'temporal_coverage',
):
nb = es.delete_by_query(
index=index,
body=query,
)['deleted']
logger.info("Deleted %d documents from %s", nb, index)
| 29.866055 | 79 | 0.585734 |
acde842285fb087601b348424c958a5d56000b0a | 1,460 | py | Python | energy_check.py | thanasi/optics_tools | 0b47c11c5718df244f90cd9685979245662d8c9d | [
"MIT"
] | null | null | null | energy_check.py | thanasi/optics_tools | 0b47c11c5718df244f90cd9685979245662d8c9d | [
"MIT"
] | null | null | null | energy_check.py | thanasi/optics_tools | 0b47c11c5718df244f90cd9685979245662d8c9d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 8 08:46:10 2017
@author: thanasi
"""
import numpy as np
from pint import UnitRegistry
ureg = UnitRegistry()
Q = ureg.Quantity
# %%
# pulse energy
#e0 = Q(250, "mJ")
e0 = (Q(1,"mJ") * 8.1596 + Q(0.375,"mJ")) * 0.055
# pulse duration
tau = Q(4, "ns")
# peak power
# conservative estimate, assuming gaussian temporal profile
peak_pow = 2 * (e0 / tau).to("W")
# beam diameter
d0 = Q(7, "mm")
# beam area
A = (np.pi * d0**2 / 4).to("cm^2")
energy_density = (e0 / A).to("J/cm^2")
peak_pow_dens = (peak_pow / A).to("MW/cm^2")
print("-"*33)
print("E-Density: {:0.3g~}".format(energy_density))
print("P-Density: {:0.1f~}".format(peak_pow_dens))
print("-"*33)
# %%
# check necessary extinction for photodetector
# Thorlabs DET025A
# power damage threshold
p_dam_thresh = Q(18, "mW")
# energy damage threshold, given beam properties above
e_dam_thresh = (p_dam_thresh*tau/2).to("mJ")
# detector active area
det_area = Q(250, "um")**2
# maximum allowable power density
max_det_pow_dens = p_dam_thresh / det_area
# reduction factor needed from laser beam
red = (peak_pow_dens / max_det_pow_dens).to("").magnitude
red_OD = np.ceil(np.log10(red))
print("-"*33)
print("Max Peak Power: {:1.2g}".format(p_dam_thresh))
print("Max Beam Energy: {:1.2g}".format(e_dam_thresh))
print("Power Reduction Needed: {:1.2g}".format(red))
print("OD Needed: {:2.0f}".format(red_OD))
print("-"*33)
| 19.72973 | 59 | 0.664384 |
acde8432093fc737621b8855fa7479b5af10c9fa | 1,878 | py | Python | harness/tests/storage/test_storage_base.py | RAbraham/determined | 1161b667ed6d0242f70f9f15d58600f910c8d7f9 | [
"Apache-2.0"
] | 1,729 | 2020-04-27T17:36:40.000Z | 2022-03-31T05:48:39.000Z | harness/tests/storage/test_storage_base.py | ChrisW09/determined | 5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0 | [
"Apache-2.0"
] | 1,940 | 2020-04-27T17:34:14.000Z | 2022-03-31T23:02:28.000Z | harness/tests/storage/test_storage_base.py | ChrisW09/determined | 5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0 | [
"Apache-2.0"
] | 214 | 2020-04-27T19:57:28.000Z | 2022-03-29T08:17:16.000Z | import os
import pytest
from determined.common import storage
from determined.common.check import CheckFailedError
from determined.common.storage import StorageManager
def test_unknown_type() -> None:
config = {"type": "unknown"}
with pytest.raises(TypeError, match="Unknown storage type: unknown"):
storage.build(config, container_path=None)
def test_missing_type() -> None:
with pytest.raises(CheckFailedError, match="Missing 'type' parameter"):
storage.build({}, container_path=None)
def test_illegal_type() -> None:
config = {"type": 4}
with pytest.raises(CheckFailedError, match="must be a string"):
storage.build(config, container_path=None)
def test_build_with_container_path() -> None:
config = {"type": "shared_fs", "host_path": "/host_path", "storage_path": "storage_path"}
manager = storage.build(config, container_path=None)
assert manager._base_path == "/host_path/storage_path"
manager = storage.build(config, container_path="/container_path")
assert manager._base_path == "/container_path/storage_path"
def test_list_directory() -> None:
root = os.path.join(os.path.dirname(__file__), "fixtures")
assert set(StorageManager._list_directory(root)) == {
"root.txt",
"nested/",
"nested/nested.txt",
"nested/another.txt",
}
def test_list_directory_on_file() -> None:
root = os.path.join(os.path.dirname(__file__), "fixtures", "root.txt")
assert os.path.exists(root)
with pytest.raises(CheckFailedError, match="must be an extant directory"):
StorageManager._list_directory(root)
def test_list_nonexistent_directory() -> None:
root = "./non-existent-directory"
assert not os.path.exists(root)
with pytest.raises(CheckFailedError, match="must be an extant directory"):
StorageManager._list_directory(root)
| 32.37931 | 93 | 0.709265 |
acde86a1ee3179ad9a97694a0e5abe4767bb9d59 | 510 | py | Python | src/sentry/conf/locale.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 4 | 2019-05-27T13:55:07.000Z | 2021-03-30T07:05:09.000Z | src/sentry/conf/locale.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | src/sentry/conf/locale.py | AlexWayfer/sentry | ef935cda2b2e960bd602fda590540882d1b0712d | [
"BSD-3-Clause"
] | 1 | 2017-02-09T06:36:57.000Z | 2017-02-09T06:36:57.000Z | from __future__ import absolute_import
import os
import json
import sentry
# change locale file dir name to locale code
def dirname_to_local(dir_name):
if '_' in dir_name:
pre, post = dir_name.split('_', 1)
dir_name = u'{}-{}'.format(pre, post.lower())
return dir_name
with open(os.path.join(os.path.dirname(sentry.__file__), 'locale', 'catalogs.json'), 'r') as f:
CATALOGS = json.load(f)['supported_locales']
CATALOGS = [dirname_to_local(dirname) for dirname in CATALOGS]
| 26.842105 | 95 | 0.696078 |
acde86d38b6c9504b626f9a6d69188b29d526e1c | 330 | py | Python | block-storage-attacher/tests/e2e/scripts/writeOPENSOURCE.py | Nadine2016/ibmcloud-storage-utilities | f0160d4e95cf1b3b3881d28dd1bac9ed99e4b7cd | [
"Apache-2.0"
] | null | null | null | block-storage-attacher/tests/e2e/scripts/writeOPENSOURCE.py | Nadine2016/ibmcloud-storage-utilities | f0160d4e95cf1b3b3881d28dd1bac9ed99e4b7cd | [
"Apache-2.0"
] | null | null | null | block-storage-attacher/tests/e2e/scripts/writeOPENSOURCE.py | Nadine2016/ibmcloud-storage-utilities | f0160d4e95cf1b3b3881d28dd1bac9ed99e4b7cd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import yaml
f = open('OPENSOURCE', 'w')
with open("glide.lock", 'r') as stream:
try:
data = yaml.load(stream, Loader=yaml.Loader)
for dep in data["imports"]:
f.write(dep["name"] + "," + dep["version"] + '\n')
except yaml.YAMLError as exc:
print(exc)
f.close()
| 20.625 | 62 | 0.560606 |
acde86f8e88361691db77f0995b1b0bbbd72cdf0 | 6,114 | py | Python | docs/conf.py | egurra/thermostate | 87973d3341b2ba6b0f76388b9f1f0aae029d27a2 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | egurra/thermostate | 87973d3341b2ba6b0f76388b9f1f0aae029d27a2 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | egurra/thermostate | 87973d3341b2ba6b0f76388b9f1f0aae029d27a2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# thermostate documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 10 10:16:52 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import pkg_resources
import datetime
import shutil
shutil.copy2('../CHANGELOG.md', 'CHANGELOG.md')
shutil.copy2('../CODE_OF_CONDUCT.md', 'CODE_OF_CONDUCT.md')
shutil.copy2('../CONTRIBUTING.md', 'CONTRIBUTING.md')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
on_travis = os.environ.get('TRAVIS') == 'True'
if not on_travis:
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'nbsphinx',
]
# add_function_parentheses = False
autodoc_default_flags = ['members']
# autodoc_member_order = 'bysource'
autoclass_content = 'class'
napoleon_numpy_docstring = True
napoleon_google_docstring = False
nbsphinx_allow_errors = True
nbsphinx_execute = 'always'
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'pint': ('https://pint.readthedocs.io/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'thermostate'
author = 'Bryan W. Weber'
this_year = datetime.date.today().year
copyright = '{}, {}'.format(this_year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
try:
release = pkg_resources.get_distribution(project).version
except pkg_resources.DistributionNotFound:
release = 'unknown'
# The short X.Y version.
version = '.'.join(release.split('.')[:1])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'bryanwweber',
'github_repo': 'thermostate',
'github_banner': True,
'github_button': True,
'show_powered_by': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'thermostatedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'thermostate.tex', 'thermostate Documentation',
'Bryan W. Weber', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thermostate', 'thermostate Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'thermostate', 'thermostate Documentation',
author, 'thermostate', 'One line description of project.',
'Miscellaneous'),
]
| 30.723618 | 79 | 0.689238 |
acde871284b8c5b64fcb69aefa59c52bb6e7e76a | 62,571 | py | Python | sympy/core/function.py | Timeroot/sympy | f95bf4bbc548d326f4643d22faec32aca7880187 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/function.py | Timeroot/sympy | f95bf4bbc548d326f4643d22faec32aca7880187 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/function.py | Timeroot/sympy | f95bf4bbc548d326f4643d22faec32aca7880187 | [
"BSD-3-Clause"
] | null | null | null | """
There are two types of functions:
1) defined function like exp or sin that has a name and body
(in the sense that function can be evaluated).
e = exp
2) undefined function with a name but no body. Undefined
functions can be defined using a Function class as follows:
f = Function('f')
(the result will be a Function instance)
3) this isn't implemented yet: anonymous function or lambda function that has
no name but has body with dummy variables. Examples of anonymous function
creation:
f = Lambda(x, exp(x)*x)
f = Lambda(exp(x)*x) # free symbols in the expression define the number of arguments
f = exp * Lambda(x,x)
4) isn't implemented yet: composition of functions, like (sin+cos)(x), this
works in sympy core, but needs to be ported back to SymPy.
Example:
>>> import sympy
>>> f = sympy.Function("f")
>>> from sympy.abc import x
>>> f(x)
f(x)
>>> print sympy.srepr(f(x).func)
Function('f')
>>> f(x).args
(x,)
"""
from core import BasicMeta, C
from assumptions import WithAssumptions
from basic import Basic
from singleton import S
from expr import Expr, AtomicExpr
from decorators import _sympifyit, deprecated
from compatibility import iterable,is_sequence
from cache import cacheit
from numbers import Rational
from sympy.core.containers import Tuple
from sympy.utilities import default_sort_key
from sympy.utilities.iterables import uniq
from sympy import mpmath
import sympy.mpmath.libmp as mlib
class PoleError(Exception):
pass
class ArgumentIndexError(ValueError):
def __str__(self):
return ("Invalid operation with argument number %s for Function %s" %
(self.args[1], self.args[0]))
class FunctionClass(WithAssumptions):
"""
Base class for function classes. FunctionClass is a subclass of type.
Use Function('<function name>' [ , signature ]) to create
undefined function classes.
"""
__metaclass__ = BasicMeta
_new = type.__new__
def __repr__(cls):
return cls.__name__
@deprecated
def __contains__(self, obj):
return (self == obj)
class Application(Basic):
"""
Base class for applied functions.
Instances of Application represent the result of applying an application of
any type to any object.
"""
__metaclass__ = FunctionClass
__slots__ = []
is_Function = True
nargs = None
@cacheit
def __new__(cls, *args, **options):
args = map(sympify, args)
# these lines should be refactored
for opt in ["nargs", "dummy", "comparable", "noncommutative", "commutative"]:
if opt in options:
del options[opt]
if options.pop('evaluate', True):
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
return super(Application, cls).__new__(cls, *args, **options)
@classmethod
def eval(cls, *args):
"""
Returns a canonical form of cls applied to arguments args.
The eval() method is called when the class cls is about to be
instantiated and it should return either some simplified instance
(possible of some other class), or if the class cls should be
unmodified, return None.
Example of eval() for the function "sign"
---------------------------------------------
@classmethod
def eval(cls, arg):
if arg is S.NaN:
return S.NaN
if arg is S.Zero: return S.Zero
if arg.is_positive: return S.One
if arg.is_negative: return S.NegativeOne
if isinstance(arg, C.Mul):
coeff, terms = arg.as_coeff_mul()
if coeff is not S.One:
return cls(coeff) * cls(arg._new_rawargs(*terms))
"""
return
@property
def func(self):
return self.__class__
def _eval_subs(self, old, new):
if self == old:
return new
elif old.is_Function and new.is_Function:
if old == self.func:
nargs = len(self.args)
if (nargs == new.nargs or new.nargs is None or
(isinstance(new.nargs, tuple) and nargs in new.nargs)):
return new(*self.args)
return self.func(*[s.subs(old, new) for s in self.args])
@deprecated
def __contains__(self, obj):
if self.func == obj:
return True
return super(Application, self).__contains__(obj)
class Function(Application, Expr):
"""
Base class for applied numeric functions.
Constructor of undefined function classes.
"""
@property
def _diff_wrt(self):
"""Allow derivatives wrt functions.
Examples
========
>>> from sympy import Function, Symbol
>>> f = Function('f')
>>> x = Symbol('x')
>>> f(x)._diff_wrt
True
"""
return True
@cacheit
def __new__(cls, *args, **options):
# Handle calls like Function('f')
if cls is Function:
return UndefinedFunction(*args)
if cls.nargs is not None:
if isinstance(cls.nargs, tuple):
nargs = cls.nargs
else:
nargs = (cls.nargs,)
n = len(args)
if n not in nargs:
# XXX: exception message must be in exactly this format to make it work with
# NumPy's functions like vectorize(). Ideal solution would be just to attach
# metadata to the exception and change NumPy to take advantage of this.
raise TypeError('%(name)s takes exactly %(args)s argument%(plural)s (%(given)s given)' %
{'name': cls, 'args': cls.nargs, 'plural': 's'*(n != 1), 'given': n})
args = map(sympify, args)
evaluate = options.pop('evaluate', True)
if evaluate:
evaluated = cls.eval(*args)
if evaluated is not None:
return evaluated
result = super(Application, cls).__new__(cls, *args, **options)
pr = max(cls._should_evalf(a) for a in args)
pr2 = min(cls._should_evalf(a) for a in args)
if evaluate and pr2 > 0:
return result.evalf(mlib.libmpf.prec_to_dps(pr))
return result
@classmethod
def _should_evalf(cls, arg):
"""
Decide if the function should automatically evalf().
By default (in this implementation), this happens if (and only if) the
ARG is a floating point number.
This function is used by __new__.
"""
if arg.is_Float:
return arg._prec
if not arg.is_Add:
return -1
re, im = arg.as_real_imag()
l = [a._prec for a in [re, im] if a.is_Float]
l.append(-1)
return max(l)
@classmethod
def class_key(cls):
funcs = {
'exp': 10, 'log': 11,
'sin': 20, 'cos': 21, 'tan': 22, 'cot': 23,
'sinh': 30, 'cosh': 31, 'tanh': 32, 'coth': 33,
'conjugate': 40, 're': 41, 'im': 42, 'arg': 43,
}
name = cls.__name__
try:
i = funcs[name]
except KeyError:
nargs = cls.nargs
if nargs is None:
i = 0
else:
i = 10000
return 4, i, name
@property
def is_commutative(self):
if all(getattr(t, 'is_commutative') for t in self.args):
return True
else:
return False
def _eval_evalf(self, prec):
# Lookup mpmath function based on name
fname = self.func.__name__
try:
if not hasattr(mpmath, fname):
from sympy.utilities.lambdify import MPMATH_TRANSLATIONS
fname = MPMATH_TRANSLATIONS[fname]
func = getattr(mpmath, fname)
except (AttributeError, KeyError):
try:
return C.Float(self._imp_(*self.args), prec)
except (AttributeError, TypeError):
return
# Convert all args to mpf or mpc
try:
args = [arg._to_mpmath(prec) for arg in self.args]
except ValueError:
return
# Set mpmath precision and apply. Make sure precision is restored
# afterwards
orig = mpmath.mp.prec
try:
mpmath.mp.prec = prec
v = func(*args)
finally:
mpmath.mp.prec = orig
return Expr._from_mpmath(v, prec)
def _eval_is_comparable(self):
if self.is_Function:
r = True
for s in self.args:
c = s.is_comparable
if c is None: return
if not c: r = False
return r
return
def _eval_derivative(self, s):
# f(x).diff(s) -> x.diff(s) * f.fdiff(1)(s)
i = 0
l = []
for a in self.args:
i += 1
da = a.diff(s)
if da is S.Zero:
continue
try:
df = self.fdiff(i)
except ArgumentIndexError:
df = Function.fdiff(self, i)
l.append(df * da)
return Add(*l)
def _eval_is_commutative(self):
r = True
for a in self._args:
c = a.is_commutative
if c is None: return None
if not c: r = False
return r
def as_base_exp(self):
return self, S.One
def _eval_aseries(self, n, args0, x, logx):
"""
Compute an asymptotic expansion around args0, in terms of self.args.
This function is only used internally by _eval_nseries and should not
be called directly; derived classes can overwrite this to implement
asymptotic expansions.
"""
raise PoleError('Asymptotic expansion of %s around %s '
'not implemented.' % (type(self), args0))
def _eval_nseries(self, x, n, logx):
"""
This function does compute series for multivariate functions,
but the expansion is always in terms of *one* variable.
Examples:
>>> from sympy import atan2, O
>>> from sympy.abc import x, y
>>> atan2(x, y).series(x, n=2)
atan2(0, y) + x/y + O(x**2)
>>> atan2(x, y).series(y, n=2)
-y/x + atan2(x, 0) + O(y**2)
This function also computes asymptotic expansions, if necessary
and possible:
>>> from sympy import loggamma
>>> loggamma(1/x)._eval_nseries(x,0,None)
-1/x - log(x)/x + log(x)/2 + O(1)
"""
if self.func.nargs is None:
raise NotImplementedError('series for user-defined \
functions are not supported.')
args = self.args
args0 = [t.limit(x, 0) for t in args]
if any(t.is_bounded == False for t in args0):
from sympy import Dummy, oo, zoo, nan
a = [t.compute_leading_term(x, logx=logx) for t in args]
a0 = [t.limit(x, 0) for t in a]
if any ([t.has(oo, -oo, zoo, nan) for t in a0]):
return self._eval_aseries(n, args0, x, logx)._eval_nseries(x, n, logx)
# Careful: the argument goes to oo, but only logarithmically so. We
# are supposed to do a power series expansion "around the
# logarithmic term". e.g.
# f(1+x+log(x))
# -> f(1+logx) + x*f'(1+logx) + O(x**2)
# where 'logx' is given in the argument
a = [t._eval_nseries(x, n, logx) for t in args]
z = [r - r0 for (r, r0) in zip(a, a0)]
p = [Dummy() for t in z]
q = []
v = None
for ai, zi, pi in zip(a0, z, p):
if zi.has(x):
if v is not None: raise NotImplementedError
q.append(ai + pi)
v = pi
else:
q.append(ai)
e1 = self.func(*q)
if v is None:
return e1
s = e1._eval_nseries(v, n, logx)
o = s.getO()
s = s.removeO()
s = s.subs(v, zi).expand() + C.Order(o.expr.subs(v, zi), x)
return s
if (self.func.nargs == 1 and args0[0]) or self.func.nargs > 1:
e = self
e1 = e.expand()
if e == e1:
#for example when e = sin(x+1) or e = sin(cos(x))
#let's try the general algorithm
term = e.subs(x, S.Zero)
if term.is_bounded is False or term is S.NaN:
raise PoleError("Cannot expand %s around 0" % (self))
series = term
fact = S.One
for i in range(n-1):
i += 1
fact *= Rational(i)
e = e.diff(x)
subs = e.subs(x, S.Zero)
if subs is S.NaN:
# try to evaluate a limit if we have to
subs = e.limit(x, S.Zero)
if subs.is_bounded is False:
raise PoleError("Cannot expand %s around 0" % (self))
term = subs*(x**i)/fact
term = term.expand()
series += term
return series + C.Order(x**n, x)
return e1.nseries(x, n=n, logx=logx)
arg = self.args[0]
l = []
g = None
for i in xrange(n+2):
g = self.taylor_term(i, arg, g)
g = g.nseries(x, n=n, logx=logx)
l.append(g)
return Add(*l) + C.Order(x**n, x)
def _eval_expand_basic(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_expand_power_exp(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_exp'):
newterm = term._eval_expand_power_exp(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_expand_power_base(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_base'):
newterm = term._eval_expand_power_base(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_expand_mul(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_mul'):
newterm = term._eval_expand_mul(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_expand_multinomial(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_multinomail'):
newterm = term._eval_expand_multinomial(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_expand_log(self, deep=True, **hints):
if not deep:
return self
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_expand_complex(self, deep=True, **hints):
if deep:
func = self.func(*[ a.expand(deep, **hints) for a in self.args ])
else:
func = self.func(*self.args)
return C.re(func) + S.ImaginaryUnit * C.im(func)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.func(*terms)
def _eval_rewrite(self, pattern, rule, **hints):
if hints.get('deep', False):
args = [ a._eval_rewrite(pattern, rule, **hints) for a in self.args ]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args)
def fdiff(self, argindex=1):
if self.nargs is not None:
if isinstance(self.nargs, tuple):
nargs = self.nargs[-1]
else:
nargs = self.nargs
if not (1<=argindex<=nargs):
raise ArgumentIndexError(self, argindex)
if not self.args[argindex-1]._diff_wrt:
# See issue 1525 and issue 1620 and issue 2501
arg_dummy = C.Dummy('xi_%i' % argindex)
return Subs(Derivative(
self.subs(self.args[argindex-1], arg_dummy),
arg_dummy), arg_dummy, self.args[argindex-1])
return Derivative(self,self.args[argindex-1],evaluate=False)
def _eval_as_leading_term(self, x):
"""General method for the leading term"""
# XXX This seems broken to me!
arg = self.args[0].as_leading_term(x)
if C.Order(1,x).contains(arg):
return arg
else:
return self.func(arg)
@classmethod
def taylor_term(cls, n, x, *previous_terms):
"""General method for the taylor term.
This method is slow, because it differentiates n-times. Subclasses can
redefine it to make it faster by using the "previous_terms".
"""
x = sympify(x)
return cls(x).diff(x, n).subs(x, 0) * x**n / C.factorial(n)
class AppliedUndef(Function):
"""
Base class for expressions resulting from the application of an undefined function.
"""
def __new__(cls, *args, **options):
args = map(sympify, args)
result = Expr.__new__(cls, *args, **options)
result.nargs = len(args)
return result
class UndefinedFunction(FunctionClass):
"""
The (meta)class of undefined functions.
"""
def __new__(mcl, name):
return BasicMeta.__new__(mcl, name, (AppliedUndef,), {})
class WildFunction(Function, AtomicExpr):
"""
WildFunction() matches any expression but another WildFunction()
XXX is this as intended, does it work ?
"""
nargs = 1
def __new__(cls, name, **assumptions):
obj = Function.__new__(cls, name, **assumptions)
obj.name = name
return obj
def matches(self, expr, repl_dict={}):
if self.nargs is not None:
if not hasattr(expr,'nargs') or self.nargs != expr.nargs:
return None
repl_dict = repl_dict.copy()
repl_dict[self] = expr
return repl_dict
@property
def is_number(self):
return False
class Derivative(Expr):
"""
Carries out differentiation of the given expression with respect to symbols.
expr must define ._eval_derivative(symbol) method that returns
the differentiation result. This function only needs to consider the
non-trivial case where expr contains symbol and it should call the diff()
method internally (not _eval_derivative); Derivative should be the only
one to call _eval_derivative.
Ordering of variables
---------------------
If evaluate is set to True and the expression can not be evaluated, the
list of differentiation symbols will be sorted, that is, the expression is
assumed to have continuous derivatives up to the order asked. This sorting
assumes that derivatives wrt Symbols commute, derivatives wrt non-Symbols
commute, but Symbol and non-Symbol derivatives don't commute with each
other.
Derivative wrt non-Symbols
--------------------------
This class also allows derivatives wrt non-Symbols that have _diff_wrt
set to True, such as Function and Derivative. When a derivative wrt a non-
Symbol is attempted, the non-Symbol is temporarily converted to a Symbol
while the differentiation is performed.
Note that this may seem strange, that Derivative allows things like
f(g(x)).diff(g(x)), or even f(cos(x)).diff(cos(x)). The motivation for
allowing this syntax is to make it easier to work with variational calculus
(i.e., the Euler-Lagrange method). The best way to understand this is that
the action of derivative with respect to a non-Symbol is defined by the
above description: the object is substituted for a Symbol and the
derivative is taken with respect to that. This action is only allowed for
objects for which this can be done unambiguously, for example Function and
Derivative objects. Note that this leads to what may appear to be
mathematically inconsistent results. For example::
>>> from sympy import cos, sin, sqrt
>>> from sympy.abc import x
>>> (2*cos(x)).diff(cos(x))
2
>>> (2*sqrt(1 - sin(x)**2)).diff(cos(x))
0
This appears wrong because in fact 2*cos(x) and 2*sqrt(1 - sin(x)**2) are
identically equal. However this is the wrong way to think of this. Think
of it instead as if we have something like this::
>>> from sympy.abc import c, s
>>> def F(u):
... return 2*u
...
>>> def G(u):
... return 2*sqrt(1 - u**2)
...
>>> F(cos(x))
2*cos(x)
>>> G(sin(x))
2*sqrt(-sin(x)**2 + 1)
>>> F(c).diff(c)
2
>>> F(c).diff(c)
2
>>> G(s).diff(c)
0
>>> G(sin(x)).diff(cos(x))
0
Here, the Symbols c and s act just like the functions cos(x) and sin(x),
respectively. Think of 2*cos(x) as f(c).subs(c, cos(x)) (or f(c) *at*
c = cos(x)) and 2*sqrt(1 - sin(x)**2) as g(s).subs(s, sin(x)) (or g(s) *at*
s = sin(x)), where f(u) == 2*u and g(u) == 2*sqrt(1 - u**2). Here, we
define the function first and evaluate it at the function, but we can
actually unambiguously do this in reverse in SymPy, because
expr.subs(Function, Symbol) is well-defined: just structurally replace the
function everywhere it appears in the expression.
This is actually the same notational convenience used in the Euler-Lagrange
method when one says F(t, f(t), f'(t)).diff(f(t)). What is actually meant
is that the expression in question is represented by some F(t, u, v) at
u = f(t) and v = f'(t), and F(t, f(t), f'(t)).diff(f(t)) simply means
F(t, u, v).diff(u) at u = f(t).
We do not allow to take derivative with respect to expressions where this
is not so well defined. For example, we do not allow expr.diff(x*y)
because there are multiple ways of structurally defining where x*y appears
in an expression, some of which may surprise the reader (for example, a
very strict definition would have that (x*y*z).diff(x*y) == 0).
>>> from sympy.abc import x, y, z
>>> (x*y*z).diff(x*y)
Traceback (most recent call last):
...
ValueError: Can't differentiate wrt the variable: x*y, 1
Note that this definition also fits in nicely with the definition of the
chain rule. Note how the chain rule in SymPy is defined using unevaluated
Subs objects::
>>> from sympy import symbols, Function
>>> f, g = symbols('f g', cls=Function)
>>> f(2*g(x)).diff(x)
2*Derivative(g(x), x)*Subs(Derivative(f(_xi_1), _xi_1), (_xi_1,), (2*g(x),))
This says that the derivative of f(2*g(x)) with respect to x is
2*g(x).diff(x) times f(u).diff(u) at u = 2*g(x). Note that this last part
is exactly the same as our definition of a derivative with respect to a
function given above, except the derivative is at a non-Function 2*g(x).
If we instead computed f(g(x)).diff(x), we would be able to express the Subs
as simply f(g(x)).diff(g(x)). Therefore, SymPy does this when it's
possible::
>>> f(g(x)).diff(x)
Derivative(f(g(x)), g(x))*Derivative(g(x), x)
Finally, note that, to be consistent with variational calculus, and to
ensure that the definition of substituting a Function for a Symbol in an
expression is well-defined, derivatives of functions are assumed to not be
related to the function. In other words, we have::
>>> from sympy import diff
>>> diff(f(x), x).diff(f(x))
0
The same is actually true for derivatives of different orders::
>>> diff(f(x), x, 2).diff(diff(f(x), x, 1))
0
>>> diff(f(x), x, 1).diff(diff(f(x), x, 2))
0
Note, any class can allow derivatives to be taken with respect to itself.
See the docstring of Expr._diff_wrt.
Examples
========
Some basic examples:
>>> from sympy import Derivative, Symbol, Function
>>> f = Function('f')
>>> g = Function('g')
>>> x = Symbol('x')
>>> y = Symbol('y')
>>> Derivative(x**2, x, evaluate=True)
2*x
>>> Derivative(Derivative(f(x,y), x), y)
Derivative(f(x, y), x, y)
>>> Derivative(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> Derivative(f(x, y), y, x, evaluate=True)
Derivative(f(x, y), x, y)
Now some derivatives wrt functions:
>>> Derivative(f(x)**2, f(x), evaluate=True)
2*f(x)
>>> Derivative(f(g(x)), x, evaluate=True)
Derivative(f(g(x)), g(x))*Derivative(g(x), x)
"""
is_Derivative = True
@property
def _diff_wrt(self):
"""Allow derivatives wrt Derivatives if it contains a function.
Examples
========
>>> from sympy import Function, Symbol, Derivative
>>> f = Function('f')
>>> x = Symbol('x')
>>> Derivative(f(x),x)._diff_wrt
True
>>> Derivative(x**2,x)._diff_wrt
False
"""
if self.expr.is_Function:
return True
else:
return False
def __new__(cls, expr, *variables, **assumptions):
expr = sympify(expr)
# There are no variables, we differentiate wrt all of the free symbols
# in expr.
if not variables:
variables = expr.free_symbols
if len(variables) != 1:
raise ValueError("specify differentiation variables to differentiate %s" % expr)
# Standardize the variables by sympifying them and making appending a
# count of 1 if there is only one variable: diff(e,x)->diff(e,x,1).
variables = list(sympify(variables))
if not variables[-1].is_Integer or len(variables) == 1:
variables.append(S.One)
# Split the list of variables into a list of the variables we are diff
# wrt, where each element of the list has the form (s, count) where
# s is the entity to diff wrt and count is the order of the
# derivative.
variable_count = []
all_zero = True
i = 0
while i < len(variables) - 1: # process up to final Integer
v, count = variables[i: i+2]
iwas = i
if v._diff_wrt:
# We need to test the more specific case of count being an
# Integer first.
if count.is_Integer:
count = int(count)
i += 2
elif count._diff_wrt:
count = 1
i += 1
if i == iwas: # didn't get an update because of bad input
raise ValueError("Can't differentiate wrt the variable: %s, %s" % (v, count))
variable_count.append((v, count))
if all_zero and not count == 0:
all_zero = False
# We make a special case for 0th derivative, because there is no
# good way to unambiguously print this.
if all_zero:
return expr
# Pop evaluate because it is not really an assumption and we will need
# to track use it carefully below.
evaluate = assumptions.pop('evaluate', False)
# Look for a quick exit if there are symbols that don't appear in
# expression at all. Note, this cannnot check non-symbols like
# functions and Derivatives as those can be created by intermediate
# derivatives.
if evaluate:
symbol_set = set(sc[0] for sc in variable_count if sc[0].is_Symbol)
if symbol_set.difference(expr.free_symbols):
return S.Zero
# We make a generator so as to only generate a variable when necessary.
# If a high order of derivative is requested and the expr becomes 0
# after a few differentiations, then we won't need the other variables.
variablegen = (v for v, count in variable_count for i in xrange(count))
if expr.is_commutative:
assumptions['commutative'] = True
# If we can't compute the derivative of expr (but we wanted to) and
# expr is itself not a Derivative, finish building an unevaluated
# derivative class by calling Expr.__new__.
if (not (hasattr(expr, '_eval_derivative') and evaluate) and
(not isinstance(expr, Derivative))):
variables = list(variablegen)
# If we wanted to evaluate, we sort the variables into standard
# order for later comparisons. This is too agressive if evaluate
# is False, so we don't do it in that case.
if evaluate:
#TODO: check if assumption of discontinuous derivatives exist
variables = cls._sort_variables(variables)
# Here we *don't* need to reinject evaluate into assumptions
# because we are done with it and it is not an assumption that
# Expr knows about.
obj = Expr.__new__(cls, expr, *variables, **assumptions)
return obj
# Compute the derivative now by repeatedly calling the
# _eval_derivative method of expr for each variable. When this method
# returns None, the derivative couldn't be computed wrt that variable
# and we save the variable for later.
unhandled_variables = []
# Once we encouter a non_symbol that is unhandled, we stop taking
# derivatives entirely. This is because derivatives wrt functions
# don't commute with derivatives wrt symbols and we can't safely
# continue.
unhandled_non_symbol = False
for v in variablegen:
is_symbol = v.is_Symbol
if unhandled_non_symbol:
obj = None
else:
if not is_symbol:
new_v = C.Symbol('diff_wrt_%i' % i)
expr = expr.subs(v, new_v)
old_v = v
v = new_v
obj = expr._eval_derivative(v)
if not is_symbol:
if obj is not None:
obj = obj.subs(v, old_v)
v = old_v
if obj is None:
unhandled_variables.append(v)
if not is_symbol:
unhandled_non_symbol = True
elif obj is S.Zero:
return S.Zero
else:
expr = obj
if unhandled_variables:
unhandled_variables = cls._sort_variables(unhandled_variables)
expr = Expr.__new__(cls, expr, *unhandled_variables, **assumptions)
else:
# We got a Derivative at the end of it all, and we rebuild it by
# sorting its variables.
if isinstance(expr, Derivative):
expr = Derivative(
expr.args[0], *cls._sort_variables(expr.args[1:])
)
return expr
@classmethod
def _sort_variables(cls, vars):
"""Sort variables, but disallow sorting of non-symbols.
When taking derivatives, the following rules usually hold:
* Derivative wrt different symbols commute.
* Derivative wrt different non-symbols commute.
* Derivatives wrt symbols and non-symbols dont' commute.
Examples
--------
>>> from sympy import Derivative, Function, symbols
>>> vsort = Derivative._sort_variables
>>> x, y, z = symbols('x y z')
>>> f, g, h = symbols('f g h', cls=Function)
>>> vsort((x,y,z))
[x, y, z]
>>> vsort((h(x),g(x),f(x)))
[f(x), g(x), h(x)]
>>> vsort((z,y,x,h(x),g(x),f(x)))
[x, y, z, f(x), g(x), h(x)]
>>> vsort((x,f(x),y,f(y)))
[x, f(x), y, f(y)]
>>> vsort((y,x,g(x),f(x),z,h(x),y,x))
[x, y, f(x), g(x), z, h(x), x, y]
>>> vsort((z,y,f(x),x,f(x),g(x)))
[y, z, f(x), x, f(x), g(x)]
>>> vsort((z,y,f(x),x,f(x),g(x),z,z,y,x))
[y, z, f(x), x, f(x), g(x), x, y, z, z]
"""
sorted_vars = []
symbol_part = []
non_symbol_part = []
for v in vars:
if not v.is_Symbol:
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,key=default_sort_key))
symbol_part = []
non_symbol_part.append(v)
else:
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,key=default_sort_key))
non_symbol_part = []
symbol_part.append(v)
if len(non_symbol_part) > 0:
sorted_vars.extend(sorted(non_symbol_part,key=default_sort_key))
if len(symbol_part) > 0:
sorted_vars.extend(sorted(symbol_part,key=default_sort_key))
return sorted_vars
def _eval_derivative(self, v):
# If the variable s we are diff wrt is not in self.variables, we
# assume that we might be able to take the derivative.
if v not in self.variables:
obj = self.expr.diff(v)
if obj is S.Zero:
return S.Zero
if isinstance(obj, Derivative):
return Derivative(obj.expr, *(self.variables + obj.variables))
# The derivative wrt s could have simplified things such that the
# derivative wrt things in self.variables can now be done. Thus,
# we set evaluate=True to see if there are any other derivatives
# that can be done. The most common case is when obj is a simple
# number so that the derivative wrt anything else will vanish.
return Derivative(obj, *self.variables, **{'evaluate': True})
# In this case s was in self.variables so the derivatve wrt s has
# already been attempted and was not computed, either because it
# couldn't be or evaluate=False originally.
return Derivative(self.expr, *(self.variables + (v, )), **{'evaluate': False})
def doit(self, **hints):
expr = self.expr
if hints.get('deep', True):
expr = expr.doit(**hints)
hints['evaluate'] = True
return Derivative(expr, *self.variables, **hints)
@_sympifyit('z0', NotImplementedError)
def doit_numerically(self, z0):
"""
Evaluate the derivative at z numerically.
When we can represent derivatives at a point, this should be folded
into the normal evalf. For now, we need a special method.
"""
from sympy import mpmath
from sympy.core.expr import Expr
if len(self.free_symbols) != 1 or len(self.variables) != 1:
raise NotImplementedError('partials and higher order derivatives')
z = list(self.free_symbols)[0]
def eval(x):
f0 = self.expr.subs(z, Expr._from_mpmath(x, prec=mpmath.mp.prec))
f0 = f0.evalf(mlib.libmpf.prec_to_dps(mpmath.mp.prec))
return f0._to_mpmath(mpmath.mp.prec)
return Expr._from_mpmath(mpmath.diff(eval, z0._to_mpmath(mpmath.mp.prec)),
mpmath.mp.prec)
@property
def expr(self):
return self._args[0]
@property
def variables(self):
return self._args[1:]
@property
def free_symbols(self):
return self.expr.free_symbols
def _eval_subs(self, old, new):
if self==old:
return new
# Subs should only be used in situations where new is not a valid
# variable for differentiating wrt. Previously, Subs was used for
# anything that was not a Symbol, but that was too broad.
if old in self.variables and not new._diff_wrt:
# Issue 1620
return Subs(self, old, new)
return Derivative(*map(lambda x: x._eval_subs(old, new), self.args))
def _eval_lseries(self, x):
dx = self.args[1:]
for term in self.args[0].lseries(x):
yield Derivative(term, *dx)
def _eval_nseries(self, x, n, logx):
arg = self.args[0].nseries(x, n=n, logx=logx)
o = arg.getO()
dx = self.args[1:]
rv = [Derivative(a, *dx) for a in Add.make_args(arg.removeO())]
if o:
rv.append(o/x)
return Add(*rv)
def _eval_as_leading_term(self, x):
return self.args[0].as_leading_term(x)
class Lambda(Expr):
"""
Lambda(x, expr) represents a lambda function similar to Python's
'lambda x: expr'. A function of several variables is written as
Lambda((x, y, ...), expr).
A simple example:
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> f = Lambda(x, x**2)
>>> f(4)
16
For multivariate functions, use:
>>> from sympy.abc import y, z, t
>>> f2 = Lambda((x, y, z, t), x + y**z + t**z)
>>> f2(1, 2, 3, 4)
73
A handy shortcut for lots of arguments:
>>> p = x, y, z
>>> f = Lambda(p, x + y*z)
>>> f(*p)
x + y*z
"""
is_Function = True
__slots__ = []
def __new__(cls, variables, expr):
try:
variables = Tuple(*variables)
except TypeError:
variables = Tuple(variables)
if len(variables) == 1 and variables[0] == expr:
return S.IdentityFunction
#use dummy variables internally, just to be sure
new_variables = [C.Dummy(arg.name) for arg in variables]
expr = sympify(expr).xreplace(dict(zip(variables, new_variables)))
obj = Expr.__new__(cls, Tuple(*new_variables), expr)
return obj
@property
def variables(self):
"""The variables used in the internal representation of the function"""
return self._args[0]
@property
def expr(self):
"""The return value of the function"""
return self._args[1]
@property
def free_symbols(self):
return self.expr.free_symbols - set(self.variables)
@property
def nargs(self):
"""The number of arguments that this function takes"""
return len(self._args[0])
def __call__(self, *args):
if len(args) != self.nargs:
raise TypeError("%s takes %d arguments (%d given)" % (self, self.nargs, len(args)))
return self.expr.xreplace(dict(zip(self.variables, args)))
def __eq__(self, other):
if not isinstance(other, Lambda):
return False
if self.nargs != other.nargs:
return False
selfexpr = self.args[1]
otherexpr = other.args[1]
otherexpr = otherexpr.xreplace(dict(zip(other.args[0], self.args[0])))
return selfexpr == otherexpr
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Lambda, self).__hash__()
def _hashable_content(self):
return (self.nargs, ) + tuple(sorted(self.free_symbols))
@property
def is_identity(self):
"""Return ``True`` if this ``Lambda`` is an identity function. """
if len(self.args) == 2:
return self.args[0] == self.args[1]
else:
return None
class Subs(Expr):
"""
Represents unevaluated substitutions of an expression.
``Subs(expr, x, x0)`` receives 3 arguments: an expression, a variable or list
of distinct variables and a point or list of evaluation points
corresponding to those variables.
``Subs`` objects are generally useful to represent unevaluated derivatives
calculated at a point.
The variables may be expressions, but they are subjected to the limitations
of subs(), so it is usually a good practice to use only symbols for
variables, since in that case there can be no ambiguity.
There's no automatic expansion - use the method .doit() to effect all
possible substitutions of the object and also of objects inside the
expression.
When evaluating derivatives at a point that is not a symbol, a Subs object
is returned. One is also able to calculate derivatives of Subs objects - in
this case the expression is always expanded (for the unevaluated form, use
Derivative()).
A simple example:
>>> from sympy import Subs, Function, sin
>>> from sympy.abc import x, y, z
>>> f = Function('f')
>>> e = Subs(f(x).diff(x), x, y)
>>> e.subs(y, 0)
Subs(Derivative(f(_x), _x), (_x,), (0,))
>>> e.subs(f, sin).doit()
cos(y)
An example with several variables:
>>> Subs(f(x)*sin(y)+z, (x, y), (0, 1))
Subs(z + f(_x)*sin(_y), (_x, _y), (0, 1))
>>> _.doit()
z + f(0)*sin(1)
"""
def __new__(cls, expr, variables, point, **assumptions):
if not is_sequence(variables, Tuple):
variables = [variables]
variables = Tuple(*sympify(variables))
if uniq(variables) != variables:
repeated = repeated = [ v for v in set(variables)
if list(variables).count(v) > 1 ]
raise ValueError('cannot substitute expressions %s more than '
'once.' % repeated)
if not is_sequence(point, Tuple):
point = [point]
point = Tuple(*sympify(point))
if len(point) != len(variables):
raise ValueError('Number of point values must be the same as '
'the number of variables.')
# it's necessary to use dummy variables internally
new_variables = Tuple(*[ arg.as_dummy() if arg.is_Symbol else
C.Dummy(str(arg)) for arg in variables ])
expr = sympify(expr).subs(tuple(zip(variables, new_variables)))
if expr.is_commutative:
assumptions['commutative'] = True
obj = Expr.__new__(cls, expr, new_variables, point, **assumptions)
return obj
def doit(self):
return self.expr.doit().subs(zip(self.variables, self.point))
def evalf(self):
return self.doit().evalf()
@property
def variables(self):
"""The variables to be evaluated"""
return self._args[1]
@property
def expr(self):
"""The expression on which the substitution operates"""
return self._args[0]
@property
def point(self):
"""The values for which the variables are to be substituted"""
return self._args[2]
@property
def free_symbols(self):
return (self.expr.free_symbols - set(self.variables) |
set(self.point.free_symbols))
def __eq__(self, other):
if not isinstance(other, Subs):
return False
if (len(self.point) != len(other.point) or
self.free_symbols != other.free_symbols or
sorted(self.point) != sorted(other.point)):
return False
# non-repeated point args
selfargs = [ v[0] for v in sorted(zip(self.variables, self.point),
key = lambda v: v[1]) if list(self.point.args).count(v[1]) == 1 ]
otherargs = [ v[0] for v in sorted(zip(other.variables, other.point),
key = lambda v: v[1]) if list(other.point.args).count(v[1]) == 1 ]
# find repeated point values and subs each associated variable
# for a single symbol
selfrepargs = []
otherrepargs = []
if uniq(self.point) != self.point:
repeated = uniq([ v for v in self.point if
list(self.point.args).count(v) > 1 ])
repswap = dict(zip(repeated, [ C.Dummy() for _ in
xrange(len(repeated)) ]))
selfrepargs = [ (self.variables[i], repswap[v]) for i, v in
enumerate(self.point) if v in repeated ]
otherrepargs = [ (other.variables[i], repswap[v]) for i, v in
enumerate(other.point) if v in repeated ]
return self.expr.subs(selfrepargs) == other.expr.subs(
tuple(zip(otherargs, selfargs))).subs(otherrepargs)
def __ne__(self, other):
return not(self == other)
def __hash__(self):
return super(Subs, self).__hash__()
def _hashable_content(self):
return tuple(sorted(self.point)) + tuple(sorted(self.free_symbols))
def _eval_subs(self, old, new):
if self == old:
return new
return Subs(self.expr.subs(old, new), self.variables,
self.point.subs(old, new))
def _eval_derivative(self, s):
if s not in self.free_symbols:
return S.Zero
return Subs(self.expr.diff(s), self.variables, self.point).doit() \
+ Add(*[ Subs(point.diff(s) * self.expr.diff(arg),
self.variables, self.point).doit() for arg,
point in zip(self.variables, self.point) ])
def diff(f, *symbols, **kwargs):
"""
Differentiate f with respect to symbols.
This is just a wrapper to unify .diff() and the Derivative class; its
interface is similar to that of integrate(). You can use the same
shortcuts for multiple variables as with Derivative. For example,
diff(f(x), x, x, x) and diff(f(x), x, 3) both return the third derivative
of f(x).
You can pass evaluate=False to get an unevaluated Derivative class. Note
that if there are 0 symbols (such as diff(f(x), x, 0), then the result will
be the function (the zeroth derivative), even if evaluate=False.
**Examples**
>>> from sympy import sin, cos, Function, diff
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> diff(sin(x), x)
cos(x)
>>> diff(f(x), x, x, x)
Derivative(f(x), x, x, x)
>>> diff(f(x), x, 3)
Derivative(f(x), x, x, x)
>>> diff(sin(x)*cos(y), x, 2, y, 2)
sin(x)*cos(y)
>>> type(diff(sin(x), x))
cos
>>> type(diff(sin(x), x, evaluate=False))
<class 'sympy.core.function.Derivative'>
>>> type(diff(sin(x), x, 0))
sin
>>> type(diff(sin(x), x, 0, evaluate=False))
sin
>>> diff(sin(x))
cos(x)
>>> diff(sin(x*y))
Traceback (most recent call last):
...
ValueError: specify differentiation variables to differentiate sin(x*y)
Note that ``diff(sin(x))`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
**See Also**
- http://documents.wolfram.com/v5/Built-inFunctions/AlgebraicComputation/Calculus/D.html
- The docstring of Derivative.
"""
kwargs.setdefault('evaluate', True)
return Derivative(f, *symbols, **kwargs)
def expand(e, deep=True, modulus=None, power_base=True, power_exp=True, \
mul=True, log=True, multinomial=True, basic=True, **hints):
"""
Expand an expression using methods given as hints.
Hints are applied with arbitrary order so your code shouldn't
depend on the way hints are passed to this method.
Hints evaluated unless explicitly set to False are:
basic, log, multinomial, mul, power_base, and power_exp
The following hints are supported but not applied unless set to True:
complex, func, and trig.
basic is a generic keyword for methods that want to be expanded
automatically. For example, Integral uses expand_basic to expand the
integrand. If you want your class expand methods to run automatically and
they don't fit one of the already automatic methods, wrap it around
_eval_expand_basic.
If deep is set to True, things like arguments of functions are
recursively expanded. Use deep=False to only expand on the top
level.
If the 'force' hint is used, assumptions about variables will be ignored
in making the expansion.
Also see expand_log, expand_mul, separate, expand_complex, expand_trig,
and expand_func, which are wrappers around those expansion methods.
>>> from sympy import cos, exp
>>> from sympy.abc import x, y, z
mul - Distributes multiplication over addition:
>>> (y*(x + z)).expand(mul=True)
x*y + y*z
complex - Split an expression into real and imaginary parts:
>>> (x + y).expand(complex=True)
re(x) + re(y) + I*im(x) + I*im(y)
>>> cos(x).expand(complex=True)
-I*sin(re(x))*sinh(im(x)) + cos(re(x))*cosh(im(x))
power_exp - Expand addition in exponents into multiplied bases:
>>> exp(x + y).expand(power_exp=True)
exp(x)*exp(y)
>>> (2**(x + y)).expand(power_exp=True)
2**x*2**y
power_base - Split powers of multiplied bases if assumptions allow
or if the 'force' hint is used:
>>> ((x*y)**z).expand(power_base=True)
(x*y)**z
>>> ((x*y)**z).expand(power_base=True, force=True)
x**z*y**z
>>> ((2*y)**z).expand(power_base=True)
2**z*y**z
log - Pull out power of an argument as a coefficient and split logs products
into sums of logs. Note that these only work if the arguments of the log
function have the proper assumptions: the arguments must be positive and the
exponents must be real or else the force hint must be True:
>>> from sympy import log, symbols, oo
>>> log(x**2*y).expand(log=True)
log(x**2*y)
>>> log(x**2*y).expand(log=True, force=True)
2*log(x) + log(y)
>>> x, y = symbols('x,y', positive=True)
>>> log(x**2*y).expand(log=True)
2*log(x) + log(y)
trig - Do trigonometric expansions:
>>> cos(x + y).expand(trig=True)
-sin(x)*sin(y) + cos(x)*cos(y)
func - Expand other functions:
>>> from sympy import gamma
>>> gamma(x + 1).expand(func=True)
x*gamma(x)
multinomial - Expand (x + y + ...)**n where n is a positive integer:
>>> ((x + y + z)**2).expand(multinomial=True)
x**2 + 2*x*y + 2*x*z + y**2 + 2*y*z + z**2
You can shut off methods that you don't want:
>>> (exp(x + y)*(x + y)).expand()
x*exp(x)*exp(y) + y*exp(x)*exp(y)
>>> (exp(x + y)*(x + y)).expand(power_exp=False)
x*exp(x + y) + y*exp(x + y)
>>> (exp(x + y)*(x + y)).expand(mul=False)
(x + y)*exp(x)*exp(y)
Use deep=False to only expand on the top level:
>>> exp(x + exp(x + y)).expand()
exp(x)*exp(exp(x)*exp(y))
>>> exp(x + exp(x + y)).expand(deep=False)
exp(x)*exp(exp(x + y))
Note: because hints are applied in arbitrary order, some hints may
prevent expansion by other hints if they are applied first. In
particular, mul may distribute multiplications and prevent log and
power_base from expanding them. Also, if mul is applied before multinomial,
the expression might not be fully distributed. The solution is to expand
with mul=False first, then run expand_mul if you need further expansion.
Examples:
>>> from sympy import expand_log, expand, expand_mul
>>> x, y, z = symbols('x,y,z', positive=True)
::
expand(log(x*(y + z))) # could be either one below
log(x*y + x*z)
log(x) + log(y + z)
>>> expand_log(log(x*y + x*z))
log(x*y + x*z)
>>> expand(log(x*(y + z)), mul=False)
log(x) + log(y + z)
::
expand((x*(y + z))**x) # could be either one below
(x*y + x*z)**x
x**x*(y + z)**x
>>> expand((x*(y + z))**x, mul=False)
x**x*(y + z)**x
::
expand(x*(y + z)**2) # could be either one below
2*x*y*z + x*y**2 + x*z**2
x*(y + z)**2
>>> expand(x*(y + z)**2, mul=False)
x*(y**2 + 2*y*z + z**2)
>>> expand_mul(_)
x*y**2 + 2*x*y*z + x*z**2
"""
hints['power_base'] = power_base
hints['power_exp'] = power_exp
hints['mul'] = mul
hints['log'] = log
hints['multinomial'] = multinomial
hints['basic'] = basic
return sympify(e).expand(deep=deep, modulus=modulus, **hints)
# These are simple wrappers around single hints. Feel free to add ones for
# power_exp, power_base, multinomial, or basic if you need them.
def expand_mul(expr, deep=True):
"""
Wrapper around expand that only uses the mul hint. See the expand
docstring for more information.
Example:
>>> from sympy import symbols, expand_mul, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_mul(exp(x+y)*(x+y)*log(x*y**2))
x*exp(x + y)*log(x*y**2) + y*exp(x + y)*log(x*y**2)
"""
return sympify(expr).expand(deep=deep, mul=True, power_exp=False,\
power_base=False, basic=False, multinomial=False, log=False)
def expand_multinomial(expr, deep=True):
"""
Wrapper around expand that only uses the multinomial hint. See the expand
docstring for more information.
Example:
>>> from sympy import symbols, expand_multinomial, exp
>>> x, y = symbols('x y', positive=True)
>>> expand_multinomial((x + exp(x + 1))**2)
x**2 + 2*x*exp(x + 1) + exp(2*x + 2)
"""
return sympify(expr).expand(deep=deep, mul=False, power_exp=False,\
power_base=False, basic=False, multinomial=True, log=False)
def expand_log(expr, deep=True):
"""
Wrapper around expand that only uses the log hint. See the expand
docstring for more information.
Example:
>>> from sympy import symbols, expand_log, exp, log
>>> x, y = symbols('x,y', positive=True)
>>> expand_log(exp(x+y)*(x+y)*log(x*y**2))
(x + y)*(log(x) + 2*log(y))*exp(x + y)
"""
return sympify(expr).expand(deep=deep, log=True, mul=False,\
power_exp=False, power_base=False, multinomial=False, basic=False)
def expand_func(expr, deep=True):
"""
Wrapper around expand that only uses the func hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_func, gamma
>>> from sympy.abc import x
>>> expand_func(gamma(x + 2))
x*(x + 1)*gamma(x)
"""
return sympify(expr).expand(deep=deep, func=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_trig(expr, deep=True):
"""
Wrapper around expand that only uses the trig hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_trig, sin, cos
>>> from sympy.abc import x, y
>>> expand_trig(sin(x+y)*(x+y))
(x + y)*(sin(x)*cos(y) + sin(y)*cos(x))
"""
return sympify(expr).expand(deep=deep, trig=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def expand_complex(expr, deep=True):
"""
Wrapper around expand that only uses the complex hint. See the expand
docstring for more information.
Example:
>>> from sympy import expand_complex, I, im, re
>>> from sympy.abc import z
>>> expand_complex(z**(2*I))
re(z**(2*I)) + I*im(z**(2*I))
"""
return sympify(expr).expand(deep=deep, complex=True, basic=False,\
log=False, mul=False, power_exp=False, power_base=False, multinomial=False)
def count_ops(expr, visual=False):
"""
Return a representation (integer or expression) of the operations in expr.
If ``visual`` is ``False`` (default) then the sum of the coefficients of the
visual expression will be returned.
If ``visual`` is ``True`` then the number of each type of operation is shown
with the core class types (or their virtual equivalent) multiplied by the
number of times they occur.
If expr is an iterable, the sum of the op counts of the
items will be returned.
Examples:
>>> from sympy.abc import a, b, x, y
>>> from sympy import sin, count_ops
Although there isn't a SUB object, minus signs are interpreted as
either negations or subtractions:
>>> (x - y).count_ops(visual=True)
SUB
>>> (-x).count_ops(visual=True)
NEG
Here, there are two Adds and a Pow:
>>> (1 + a + b**2).count_ops(visual=True)
2*ADD + POW
In the following, an Add, Mul, Pow and two functions:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=True)
ADD + MUL + POW + 2*SIN
for a total of 5:
>>> (sin(x)*x + sin(x)**2).count_ops(visual=False)
5
Note that "what you type" is not always what you get. The expression
1/x/y is translated by sympy into 1/(x*y) so it gives a DIV and MUL rather
than two DIVs:
>>> (1/x/y).count_ops(visual=True)
DIV + MUL
The visual option can be used to demonstrate the difference in
operations for expressions in different forms. Here, the Horner
representation is compared with the expanded form of a polynomial:
>>> eq=x*(1 + x*(2 + x*(3 + x)))
>>> count_ops(eq.expand(), visual=True) - count_ops(eq, visual=True)
-MUL + 3*POW
The count_ops function also handles iterables:
>>> count_ops([x, sin(x), None, True, x + 2], visual=False)
2
>>> count_ops([x, sin(x), None, True, x + 2], visual=True)
ADD + SIN
>>> count_ops({x: sin(x), x + 2: y + 1}, visual=True)
2*ADD + SIN
"""
from sympy.simplify.simplify import fraction
expr = sympify(expr)
if isinstance(expr, Expr):
ops = []
args = [expr]
NEG = C.Symbol('NEG')
DIV = C.Symbol('DIV')
SUB = C.Symbol('SUB')
ADD = C.Symbol('ADD')
def isneg(a):
c = a.as_coeff_mul()[0]
return c.is_Number and c.is_negative
while args:
a = args.pop()
if a.is_Rational:
#-1/3 = NEG + DIV
if a is not S.One:
if a.p < 0:
ops.append(NEG)
if a.q != 1:
ops.append(DIV)
continue
elif a.is_Mul:
if isneg(a):
ops.append(NEG)
if a.args[0] is S.NegativeOne:
a = a.as_two_terms()[1]
else:
a = -a
n, d = fraction(a)
if n.is_Integer:
ops.append(DIV)
if n < 0:
ops.append(NEG)
args.append(d)
continue # won't be -Mul but could be Add
elif d is not S.One:
if not d.is_Integer:
args.append(d)
ops.append(DIV)
args.append(n)
continue # could be -Mul
elif a.is_Add:
aargs = list(a.args)
negs = 0
for i, ai in enumerate(aargs):
if isneg(ai):
negs += 1
args.append(-ai)
if i > 0:
ops.append(SUB)
else:
args.append(ai)
if i > 0:
ops.append(ADD)
if negs == len(aargs): # -x - y = NEG + SUB
ops.append(NEG)
elif isneg(aargs[0]): # -x + y = SUB, but we already recorded an ADD
ops.append(SUB - ADD)
continue
if a.is_Pow and a.exp is S.NegativeOne:
ops.append(DIV)
args.append(a.base) # won't be -Mul but could be Add
continue
if (a.is_Mul or
a.is_Pow or
a.is_Function or
isinstance(a, Derivative) or
isinstance(a, C.Integral)):
o = C.Symbol(a.func.__name__.upper())
# count the args
if (a.is_Mul or
isinstance(a, C.LatticeOp)):
ops.append(o*(len(a.args) - 1))
else:
ops.append(o)
args.extend(a.args)
elif type(expr) is dict:
ops = [count_ops(k, visual=visual) +
count_ops(v, visual=visual) for k, v in expr.iteritems()]
elif iterable(expr):
ops = [count_ops(i, visual=visual) for i in expr]
elif not isinstance(expr, Basic):
ops = []
else: # it's Basic not isinstance(expr, Expr):
assert isinstance(expr, Basic)
ops = [count_ops(a, visual=visual) for a in expr.args]
if not ops:
if visual:
return S.Zero
return 0
ops = Add(*ops)
if visual:
return ops
if ops.is_Number:
return int(ops)
return sum(int((a.args or [1])[0]) for a in Add.make_args(ops))
from sympify import sympify
from add import Add
| 34.042982 | 104 | 0.568762 |
acde87931169750f67a089c6ded798033059532f | 1,395 | py | Python | bin/01_download_images.py | jkellers/traffic-cam | 37332c5fe654f00c1c79a42cc743c46716dc4daf | [
"MIT"
] | 5 | 2020-09-25T11:24:45.000Z | 2021-02-18T16:30:03.000Z | bin/01_download_images.py | jkellers/traffic-cam | 37332c5fe654f00c1c79a42cc743c46716dc4daf | [
"MIT"
] | 7 | 2020-09-25T14:12:21.000Z | 2020-09-26T17:20:50.000Z | bin/01_download_images.py | jkellers/traffic-cam | 37332c5fe654f00c1c79a42cc743c46716dc4daf | [
"MIT"
] | 3 | 2020-09-25T13:40:13.000Z | 2020-09-29T09:11:49.000Z | #!/usr/bin/env python3
""" Download a user-defined number of images. """
import argparse
import logging
import shutil
import time
from subprocess import CalledProcessError
from typing import Optional
from traffic_cam import io, paths, classifier
logging.basicConfig(level=logging.INFO)
# parse terminal args
parser = argparse.ArgumentParser(
description="Download images from live webcam to file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-n ",
"--n_images",
help="number of images to download",
type=int,
default=15,
)
parser.add_argument(
"-s",
"--sleep",
help="number of seconds to sleep between downloads",
type=int,
default=15,
)
args = parser.parse_args()
logging.info(f"args.n_images: {args.n_images}")
logging.info(f"args.sleep: {args.sleep}")
# prepare paths to write to
paths.create_paths_if_not_exist()
model = classifier.get_classifier_model()
# download and classify N images
for i in range(args.n_images):
# download image
logging.info(f"Download image {i + 1} of {args.n_images}...")
image_path = io.download_image()
if image_path is None:
logging.info("No image downloaded.")
continue
location = classifier.move_image_by_class(image_path, model)
# sleep
logging.info("Sleeping...")
time.sleep(args.sleep)
logging.info("Finished.")
| 22.868852 | 65 | 0.713978 |
acde88666dfe2be80721d4c9846f33ce5c812bd6 | 1,306 | py | Python | python/thumbAutomation.py | nbkm8y5/FIU-SCIS-DAM-Python-Scripts | 3e441d04c2f269f0dde2c63aa2b5285c83325a5b | [
"MIT"
] | null | null | null | python/thumbAutomation.py | nbkm8y5/FIU-SCIS-DAM-Python-Scripts | 3e441d04c2f269f0dde2c63aa2b5285c83325a5b | [
"MIT"
] | 1 | 2017-02-15T16:12:44.000Z | 2017-03-06T18:20:44.000Z | python/thumbAutomation.py | nbkm8y5/FIU-SCIS-DAM-Python-Scripts | 3e441d04c2f269f0dde2c63aa2b5285c83325a5b | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
#THIS IS READY TO GO
import requests
import os
get_url = "http://localhost:3000/api/v1/digitalassets"
headers = {
'cache-control': "no-cache",
'postman-token': "2d5ecaf6-fa57-fb91-de5b-53840ef16c41"
}
allImages = requests.request("GET", get_url, headers=headers)
for index, y in enumerate(allImages.json()['response']):
print index
print y['url'] #unicode
print type(y['url']) #unicode
print y['url'].encode('ascii', 'ignore') #string
print type(y['url'].encode('ascii', 'ignore')) #string
print y['_id']
print type(y['_id']) #unicode
print y['_id'].encode('ascii', 'ignore')
print type(y['_id'].encode('ascii', 'ignore')) #string
url = "http://localhost:3000/api/v1/digitalassets/" + allImages.json()['response'][index]['_id'].encode('ascii', 'ignore')
print os.path.basename(y['url'].encode('ascii', 'ignore'))
payload = "{\"thumbnailUrl\": \"http://assets.cs.fiu.edu:3000/assets/thumbnails/thumbnail-" + os.path.basename(y['url'].encode('ascii', 'ignore')) + "\"}"
headers = {
'content-type': "application/json",
'cache-control': "no-cache",
'postman-token': "a96c6dd1-ace4-9d19-c145-1d488eb531a7"
}
response = requests.request("PUT", url, data=payload, headers=headers)
print(response.text)
| 33.487179 | 158 | 0.647014 |
acde89b26612fc1af75872a9b95c4591126490c7 | 5,222 | py | Python | tests/pass_by_ref_test.py | AbstrLabs/pyteal | 2b2c1cfcffb2a759bfac3ff06d6abea65755ea43 | [
"MIT"
] | null | null | null | tests/pass_by_ref_test.py | AbstrLabs/pyteal | 2b2c1cfcffb2a759bfac3ff06d6abea65755ea43 | [
"MIT"
] | null | null | null | tests/pass_by_ref_test.py | AbstrLabs/pyteal | 2b2c1cfcffb2a759bfac3ff06d6abea65755ea43 | [
"MIT"
] | null | null | null | from pyteal import *
from .compile_asserts import assert_new_v_old, compile_and_save
# Set the following True, if don't want to run pass-by-ref dependent tests
OLD_CODE_ONLY = False
#### TESTS FOR PyTEAL THAT PREDATE PASS-BY-REF
@Subroutine(TealType.bytes)
def logcat(some_bytes, an_int):
catted = ScratchVar(TealType.bytes)
return Seq(
catted.store(Concat(some_bytes, Itob(an_int))),
Log(catted.load()),
catted.load(),
)
def sub_logcat():
return Seq(
Assert(logcat(Bytes("hello"), Int(42)) == Bytes("hello42")),
Int(1),
)
@Subroutine(TealType.uint64)
def slow_fibonacci(n):
return (
If(n <= Int(1))
.Then(n)
.Else(slow_fibonacci(n - Int(2)) + slow_fibonacci(n - Int(1)))
)
def sub_slowfib():
return slow_fibonacci(Int(3))
@Subroutine(TealType.uint64)
def fast_fibonacci(n):
i = ScratchVar(TealType.uint64)
a = ScratchVar(TealType.uint64)
b = ScratchVar(TealType.uint64)
return Seq(
a.store(Int(0)),
b.store(Int(1)),
For(i.store(Int(1)), i.load() <= n, i.store(i.load() + Int(1))).Do(
Seq(
b.store(a.load() + b.load()),
a.store(b.load() - a.load()),
)
),
a.load(),
)
def sub_fastfib():
return fast_fibonacci(Int(3))
@Subroutine(TealType.uint64)
def recursiveIsEven(i):
return (
If(i == Int(0))
.Then(Int(1))
.ElseIf(i == Int(1))
.Then(Int(0))
.Else(recursiveIsEven(i - Int(2)))
)
def sub_even():
return Seq(
Pop(recursiveIsEven(Int(1000))),
recursiveIsEven(Int(1001)),
)
if not OLD_CODE_ONLY:
#### TESTS FOR NEW PyTEAL THAT USES PASS-BY-REF / DYNAMIC
@Subroutine(TealType.none)
def logcat_dynamic(first: ScratchVar, an_int):
return Seq(
first.store(Concat(first.load(), Itob(an_int))),
Log(first.load()),
)
def sub_logcat_dynamic():
first = ScratchVar(TealType.bytes)
return Seq(
first.store(Bytes("hello")),
logcat_dynamic(first, Int(42)),
Assert(Bytes("hello42") == first.load()),
Int(1),
)
def wilt_the_stilt():
player_score = DynamicScratchVar(TealType.uint64)
wilt = ScratchVar(TealType.uint64, 129)
kobe = ScratchVar(TealType.uint64)
dt = ScratchVar(TealType.uint64, 131)
return Seq(
player_score.set_index(wilt),
player_score.store(Int(100)),
player_score.set_index(kobe),
player_score.store(Int(81)),
player_score.set_index(dt),
player_score.store(Int(73)),
Assert(player_score.load() == Int(73)),
Assert(player_score.index() == Int(131)),
player_score.set_index(wilt),
Assert(player_score.load() == Int(100)),
Assert(player_score.index() == Int(129)),
Int(100),
)
@Subroutine(TealType.none)
def swap(x: ScratchVar, y: ScratchVar):
z = ScratchVar(TealType.anytype)
return Seq(
z.store(x.load()),
x.store(y.load()),
y.store(z.load()),
)
@Subroutine(TealType.none)
def cat(x, y):
return Pop(Concat(x, y))
def swapper():
a = ScratchVar(TealType.bytes)
b = ScratchVar(TealType.bytes)
return Seq(
a.store(Bytes("hello")),
b.store(Bytes("goodbye")),
cat(a.load(), b.load()),
swap(a, b),
Assert(a.load() == Bytes("goodbye")),
Assert(b.load() == Bytes("hello")),
Int(1000),
)
@Subroutine(TealType.none)
def factorial(n: ScratchVar):
tmp = ScratchVar(TealType.uint64)
return (
If(n.load() <= Int(1))
.Then(n.store(Int(1)))
.Else(
Seq(
tmp.store(n.load() - Int(1)),
factorial(tmp),
n.store(n.load() * tmp.load()),
)
)
)
def fac_by_ref():
n = ScratchVar(TealType.uint64)
return Seq(
n.store(Int(42)),
factorial(n),
n.load(),
)
@Subroutine(TealType.uint64)
def mixed_annotations(x: Expr, y: Expr, z: ScratchVar) -> Expr:
return Seq(
z.store(x),
Log(Concat(y, Bytes("="), Itob(x))),
x,
)
def sub_mixed():
x = Int(42)
y = Bytes("x")
z = ScratchVar(TealType.uint64)
return mixed_annotations(x, y, z)
OLD_CASES = (sub_logcat, sub_slowfib, sub_fastfib, sub_even)
def test_old():
for pt in OLD_CASES:
assert_new_v_old(pt)
if __name__ == "__main__":
test_old()
if not OLD_CODE_ONLY:
NEW_CASES = (
sub_logcat_dynamic,
swapper,
wilt_the_stilt,
fac_by_ref,
sub_mixed,
)
def test_swapper():
compile_and_save(swapper)
def test_new():
for pt in NEW_CASES:
assert_new_v_old(pt)
if __name__ == "__main__":
test_swapper()
test_new()
| 24.401869 | 75 | 0.53447 |
acde89de73a0cebebb1168ff6aaa0821e174fac3 | 16,771 | py | Python | nemo_text_processing/text_normalization/normalize_with_audio.py | Oreoluwa1234/NeMo | b01e3ceed34efe31fd43866685dbdd19a6b30928 | [
"Apache-2.0"
] | null | null | null | nemo_text_processing/text_normalization/normalize_with_audio.py | Oreoluwa1234/NeMo | b01e3ceed34efe31fd43866685dbdd19a6b30928 | [
"Apache-2.0"
] | null | null | null | nemo_text_processing/text_normalization/normalize_with_audio.py | Oreoluwa1234/NeMo | b01e3ceed34efe31fd43866685dbdd19a6b30928 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
from argparse import ArgumentParser
from glob import glob
from typing import List, Tuple
from joblib import Parallel, delayed
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
try:
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import ASRModel
ASR_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
ASR_AVAILABLE = False
try:
import pynini
from pynini.lib import rewrite
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
PYNINI_AVAILABLE = False
try:
from nemo.collections.nlp.data.text_normalization.utils import post_process_punct
from nemo_text_processing.text_normalization.data_loader_utils import pre_process
NLP_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
NLP_AVAILABLE = False
"""
The script provides multiple normalization options and chooses the best one that minimizes CER of the ASR output
(most of the semiotic classes use deterministic=False flag).
To run this script with a .json manifest file, the manifest file should contain the following fields:
"audio_data" - path to the audio file
"text" - raw text
"pred_text" - ASR model prediction
See https://github.com/NVIDIA/NeMo/blob/main/examples/asr/transcribe_speech.py on how to add ASR predictions
When the manifest is ready, run:
python normalize_with_audio.py \
--audio_data PATH/TO/MANIFEST.JSON \
--language en
To run with a single audio file, specify path to audio and text with:
python normalize_with_audio.py \
--audio_data PATH/TO/AUDIO.WAV \
--language en \
--text raw text OR PATH/TO/.TXT/FILE
--model QuartzNet15x5Base-En \
--verbose
To see possible normalization options for a text input without an audio file (could be used for debugging), run:
python python normalize_with_audio.py --text "RAW TEXT"
Specify `--cache_dir` to generate .far grammars once and re-used them for faster inference
"""
class NormalizerWithAudio(Normalizer):
"""
Normalizer class that converts text from written to spoken form.
Useful for TTS preprocessing.
Args:
input_case: expected input capitalization
lang: language
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
lang: str = 'en',
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
):
super().__init__(
input_case=input_case,
lang=lang,
deterministic=False,
cache_dir=cache_dir,
overwrite_cache=overwrite_cache,
whitelist=whitelist,
)
def normalize(self, text: str, n_tagged: int, punct_post_process: bool = True, verbose: bool = False,) -> str:
"""
Main function. Normalizes tokens from written to spoken form
e.g. 12 kg -> twelve kilograms
Args:
text: string that may include semiotic classes
n_tagged: number of tagged options to consider, -1 - to get all possible tagged options
punct_post_process: whether to normalize punctuation
verbose: whether to print intermediate meta information
Returns:
normalized text options (usually there are multiple ways of normalizing a given semiotic class)
"""
original_text = text
if self.lang == "en":
text = pre_process(text)
text = text.strip()
if not text:
if verbose:
print(text)
return text
text = pynini.escape(text)
if n_tagged == -1:
if self.lang == "en":
try:
tagged_texts = rewrite.rewrites(text, self.tagger.fst_no_digits)
except pynini.lib.rewrite.Error:
tagged_texts = rewrite.rewrites(text, self.tagger.fst)
else:
tagged_texts = rewrite.rewrites(text, self.tagger.fst)
else:
if self.lang == "en":
try:
tagged_texts = rewrite.top_rewrites(text, self.tagger.fst_no_digits, nshortest=n_tagged)
except pynini.lib.rewrite.Error:
tagged_texts = rewrite.top_rewrites(text, self.tagger.fst, nshortest=n_tagged)
else:
tagged_texts = rewrite.top_rewrites(text, self.tagger.fst, nshortest=n_tagged)
# non-deterministic Eng normalization uses tagger composed with verbalizer, no permutation in between
if self.lang == "en":
normalized_texts = tagged_texts
else:
normalized_texts = []
for tagged_text in tagged_texts:
self._verbalize(tagged_text, normalized_texts, verbose=verbose)
if len(normalized_texts) == 0:
raise ValueError()
if punct_post_process:
# do post-processing based on Moses detokenizer
if self.processor:
normalized_texts = [self.processor.detokenize([t]) for t in normalized_texts]
normalized_texts = [
post_process_punct(input=original_text, normalized_text=t) for t in normalized_texts
]
normalized_texts = set(normalized_texts)
return normalized_texts
def _verbalize(self, tagged_text: str, normalized_texts: List[str], verbose: bool = False):
"""
Verbalizes tagged text
Args:
tagged_text: text with tags
normalized_texts: list of possible normalization options
verbose: if true prints intermediate classification results
"""
def get_verbalized_text(tagged_text):
return rewrite.rewrites(tagged_text, self.verbalizer.fst)
self.parser(tagged_text)
tokens = self.parser.parse()
tags_reordered = self.generate_permutations(tokens)
for tagged_text_reordered in tags_reordered:
try:
tagged_text_reordered = pynini.escape(tagged_text_reordered)
normalized_texts.extend(get_verbalized_text(tagged_text_reordered))
if verbose:
print(tagged_text_reordered)
except pynini.lib.rewrite.Error:
continue
def select_best_match(
self,
normalized_texts: List[str],
input_text: str,
pred_text: str,
verbose: bool = False,
remove_punct: bool = False,
):
"""
Selects the best normalization option based on the lowest CER
Args:
normalized_texts: normalized text options
input_text: input text
pred_text: ASR model transcript of the audio file corresponding to the normalized text
verbose: whether to print intermediate meta information
remove_punct: whether to remove punctuation before calculating CER
Returns:
normalized text with the lowest CER and CER value
"""
if pred_text == "":
return input_text, 1000
normalized_texts_cer = calculate_cer(normalized_texts, pred_text, remove_punct)
normalized_texts_cer = sorted(normalized_texts_cer, key=lambda x: x[1])
normalized_text, cer = normalized_texts_cer[0]
if verbose:
print('-' * 30)
for option in normalized_texts:
print(option)
print('-' * 30)
return normalized_text, cer
def calculate_cer(normalized_texts: List[str], pred_text: str, remove_punct=False) -> List[Tuple[str, float]]:
"""
Calculates character error rate (CER)
Args:
normalized_texts: normalized text options
pred_text: ASR model output
Returns: normalized options with corresponding CER
"""
normalized_options = []
for text in normalized_texts:
text_clean = text.replace('-', ' ').lower()
if remove_punct:
for punct in "!?:;,.-()*+-/<=>@^_":
text_clean = text_clean.replace(punct, "")
cer = round(word_error_rate([pred_text], [text_clean], use_cer=True) * 100, 2)
normalized_options.append((text, cer))
return normalized_options
def get_asr_model(asr_model):
"""
Returns ASR Model
Args:
asr_model: NeMo ASR model
"""
if os.path.exists(args.model):
asr_model = ASRModel.restore_from(asr_model)
elif args.model in ASRModel.get_available_model_names():
asr_model = ASRModel.from_pretrained(asr_model)
else:
raise ValueError(
f'Provide path to the pretrained checkpoint or choose from {ASRModel.get_available_model_names()}'
)
return asr_model
def parse_args():
parser = ArgumentParser()
parser.add_argument("--text", help="input string or path to a .txt file", default=None, type=str)
parser.add_argument(
"--input_case", help="input capitalization", choices=["lower_cased", "cased"], default="cased", type=str
)
parser.add_argument(
"--language", help="Select target language", choices=["en", "ru", "de"], default="en", type=str
)
parser.add_argument("--audio_data", default=None, help="path to an audio file or .json manifest")
parser.add_argument(
'--model', type=str, default='QuartzNet15x5Base-En', help='Pre-trained model name or path to model checkpoint'
)
parser.add_argument(
"--n_tagged",
type=int,
default=30,
help="number of tagged options to consider, -1 - return all possible tagged options",
)
parser.add_argument("--verbose", help="print info for debugging", action="store_true")
parser.add_argument(
"--no_remove_punct_for_cer",
help="Set to True to NOT remove punctuation before calculating CER",
action="store_true",
)
parser.add_argument(
"--no_punct_post_process", help="set to True to disable punctuation post processing", action="store_true"
)
parser.add_argument("--overwrite_cache", help="set to True to re-create .far grammar files", action="store_true")
parser.add_argument("--whitelist", help="path to a file with with whitelist", default=None, type=str)
parser.add_argument(
"--cache_dir",
help="path to a dir with .far grammar file. Set to None to avoid using cache",
default=None,
type=str,
)
parser.add_argument("--n_jobs", default=-2, type=int, help="The maximum number of concurrently running jobs")
parser.add_argument("--batch_size", default=200, type=int, help="Number of examples for each process")
return parser.parse_args()
def _normalize_line(normalizer: NormalizerWithAudio, n_tagged, verbose, line: str, remove_punct, punct_post_process):
line = json.loads(line)
pred_text = line["pred_text"]
normalized_texts = normalizer.normalize(
text=line["text"], verbose=verbose, n_tagged=n_tagged, punct_post_process=punct_post_process,
)
normalized_text, cer = normalizer.select_best_match(
normalized_texts=normalized_texts,
input_text=line["text"],
pred_text=pred_text,
verbose=verbose,
remove_punct=remove_punct,
)
line["nemo_normalized"] = normalized_text
line["CER_nemo_normalized"] = cer
return line
def normalize_manifest(
normalizer,
audio_data: str,
n_jobs: int,
n_tagged: int,
remove_punct: bool,
punct_post_process: bool,
batch_size: int,
):
"""
Args:
args.audio_data: path to .json manifest file.
"""
def __process_batch(batch_idx, batch, dir_name):
normalized_lines = [
_normalize_line(
normalizer,
n_tagged,
verbose=False,
line=line,
remove_punct=remove_punct,
punct_post_process=punct_post_process,
)
for line in tqdm(batch)
]
with open(f"{dir_name}/{batch_idx}.json", "w") as f_out:
for line in normalized_lines:
f_out.write(json.dumps(line, ensure_ascii=False) + '\n')
print(f"Batch -- {batch_idx} -- is complete")
return normalized_lines
manifest_out = audio_data.replace('.json', '_normalized.json')
with open(audio_data, 'r') as f:
lines = f.readlines()
print(f'Normalizing {len(lines)} lines of {audio_data}...')
# to save intermediate results to a file
batch = min(len(lines), batch_size)
tmp_dir = manifest_out.replace(".json", "_parts")
os.makedirs(tmp_dir, exist_ok=True)
Parallel(n_jobs=n_jobs)(
delayed(__process_batch)(idx, lines[i : i + batch], tmp_dir)
for idx, i in enumerate(range(0, len(lines), batch))
)
# aggregate all intermediate files
with open(manifest_out, "w") as f_out:
for batch_f in sorted(glob(f"{tmp_dir}/*.json")):
with open(batch_f, "r") as f_in:
lines = f_in.read()
f_out.write(lines)
print(f'Normalized version saved at {manifest_out}')
if __name__ == "__main__":
args = parse_args()
if not ASR_AVAILABLE and args.audio_data:
raise ValueError("NeMo ASR collection is not installed.")
start = time.time()
args.whitelist = os.path.abspath(args.whitelist) if args.whitelist else None
if args.text is not None:
normalizer = NormalizerWithAudio(
input_case=args.input_case,
lang=args.language,
cache_dir=args.cache_dir,
overwrite_cache=args.overwrite_cache,
whitelist=args.whitelist,
)
if os.path.exists(args.text):
with open(args.text, 'r') as f:
args.text = f.read().strip()
normalized_texts = normalizer.normalize(
text=args.text,
verbose=args.verbose,
n_tagged=args.n_tagged,
punct_post_process=not args.no_punct_post_process,
)
if args.audio_data:
asr_model = get_asr_model(args.model)
pred_text = asr_model.transcribe([args.audio_data])[0]
normalized_text, cer = normalizer.select_best_match(
normalized_texts=normalized_texts,
pred_text=pred_text,
input_text=args.text,
verbose=args.verbose,
remove_punct=not args.no_remove_punct_for_cer,
)
print(f"Transcript: {pred_text}")
print(f"Normalized: {normalized_text}")
else:
print("Normalization options:")
for norm_text in normalized_texts:
print(norm_text)
elif not os.path.exists(args.audio_data):
raise ValueError(f"{args.audio_data} not found.")
elif args.audio_data.endswith('.json'):
normalizer = NormalizerWithAudio(
input_case=args.input_case,
lang=args.language,
cache_dir=args.cache_dir,
overwrite_cache=args.overwrite_cache,
whitelist=args.whitelist,
)
normalize_manifest(
normalizer=normalizer,
audio_data=args.audio_data,
n_jobs=args.n_jobs,
n_tagged=args.n_tagged,
remove_punct=not args.no_remove_punct_for_cer,
punct_post_process=not args.no_punct_post_process,
batch_size=args.batch_size,
)
else:
raise ValueError(
"Provide either path to .json manifest in '--audio_data' OR "
+ "'--audio_data' path to audio file and '--text' path to a text file OR"
"'--text' string text (for debugging without audio)"
)
print(f'Execution time: {round((time.time() - start)/60, 2)} min.')
| 35.83547 | 118 | 0.636933 |
acde89f9fc62f9e8b017d12b540f1a6f04347d49 | 1,882 | py | Python | opensanctions/crawlers/kz_afmrk_sanctions.py | opensanctions/opensanctions | 7dff9597f982d8918699b2cde3c7c337a941622d | [
"MIT"
] | 23 | 2022-02-09T12:50:36.000Z | 2022-03-30T16:04:19.000Z | opensanctions/crawlers/kz_afmrk_sanctions.py | opensanctions/opennames | 39675797b0e70e71f54edff2b8e623e23aef9c15 | [
"MIT"
] | 10 | 2022-02-03T08:44:03.000Z | 2022-03-21T15:27:40.000Z | opensanctions/crawlers/kz_afmrk_sanctions.py | opensanctions/opennames | 39675797b0e70e71f54edff2b8e623e23aef9c15 | [
"MIT"
] | 2 | 2022-02-16T11:51:05.000Z | 2022-03-02T16:55:08.000Z | from pantomime.types import XML
from opensanctions.core import Context
from opensanctions import helpers as h
FORMATS = ["%d.%m.%Y"]
def make_entity(context: Context, el, schema, entity_id):
entity = context.make(schema, target=True)
entity.id = entity_id
entity.add("notes", h.clean_note(el.findtext("./note")))
entity.add("topics", "sanction")
sanction = h.make_sanction(context, entity)
sanction.add("summary", el.findtext("./correction"))
context.emit(sanction)
return entity
def crawl(context: Context):
path = context.fetch_resource("source.xml", context.dataset.data.url)
context.export_resource(path, XML, title=context.SOURCE_TITLE)
doc = context.parse_resource_xml(path)
for el in doc.findall(".//person"):
fname = el.findtext("./fname")
mname = el.findtext("./mname")
lname = el.findtext("./lname")
name = h.make_name(given_name=fname, middle_name=mname, last_name=lname)
entity_id = context.make_id(el.findtext("./num"), fname, mname, lname)
entity = make_entity(context, el, "Person", entity_id)
h.apply_name(entity, given_name=fname, middle_name=mname, last_name=lname)
entity.add("idNumber", el.findtext("./iin"))
bdate = el.findtext("./birthdate")
entity.add("birthDate", h.parse_date(bdate, FORMATS, bdate))
context.emit(entity, target=True)
for el in doc.findall(".//org"):
name = el.findtext(".//org_name")
entity_id = context.make_id(el.findtext("./num"), name)
entity = make_entity(context, el, "Organization", entity_id)
for tag in (".//org_name", ".//org_name_en"):
names = el.findtext(tag)
if names is None:
continue
names = names.split("; ")
entity.add("name", names)
context.emit(entity, target=True)
| 36.192308 | 82 | 0.640276 |
acde8a067f9f9599475eb0e08ef94059e64198e0 | 8,400 | py | Python | docs/conf.py | idekerlab/cddiffusion | 3fac6c10217d3dcc1d6c3dc809d94ae9186f00e3 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | idekerlab/cddiffusion | 3fac6c10217d3dcc1d6c3dc809d94ae9186f00e3 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | idekerlab/cddiffusion | 3fac6c10217d3dcc1d6c3dc809d94ae9186f00e3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# cdoslom documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import cddiffusion
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Network Heat Diffusion'
copyright = u"2021, UCSD"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = cddiffusion.__version__
# The full version, including alpha/beta/rc tags.
release = cddiffusion.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cdlouvaindoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'cddiffusion.tex',
u'Network Heat Diffusion',
u'TBD', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cddiffusion',
u'Network Heat Diffusion',
[u'TBD'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cddiffusion',
u'Network Heat Diffusion',
u'TBD',
'cddiffusion',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.434783 | 76 | 0.715357 |
acde8a6b4d62311dfe34214846a5fceb08cac02c | 17,372 | py | Python | sdk/lusid/models/a2_b_movement_record.py | mneedham/lusid-sdk-python-preview | f4494009d1a2f3431d931c813cab679bdbd92c84 | [
"MIT"
] | null | null | null | sdk/lusid/models/a2_b_movement_record.py | mneedham/lusid-sdk-python-preview | f4494009d1a2f3431d931c813cab679bdbd92c84 | [
"MIT"
] | null | null | null | sdk/lusid/models/a2_b_movement_record.py | mneedham/lusid-sdk-python-preview | f4494009d1a2f3431d931c813cab679bdbd92c84 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3192
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class A2BMovementRecord(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'holding_type': 'str',
'instrument_uid': 'str',
'sub_holding_keys': 'dict(str, PerpetualProperty)',
'currency': 'str',
'transaction_id': 'str',
'movement_name': 'str',
'effective_date': 'datetime',
'units': 'float',
'start': 'A2BCategory',
'flows': 'A2BCategory',
'gains': 'A2BCategory',
'carry': 'A2BCategory',
'end': 'A2BCategory',
'properties': 'dict(str, ModelProperty)',
'group_id': 'str'
}
attribute_map = {
'holding_type': 'holdingType',
'instrument_uid': 'instrumentUid',
'sub_holding_keys': 'subHoldingKeys',
'currency': 'currency',
'transaction_id': 'transactionId',
'movement_name': 'movementName',
'effective_date': 'effectiveDate',
'units': 'units',
'start': 'start',
'flows': 'flows',
'gains': 'gains',
'carry': 'carry',
'end': 'end',
'properties': 'properties',
'group_id': 'groupId'
}
required_map = {
'holding_type': 'optional',
'instrument_uid': 'optional',
'sub_holding_keys': 'optional',
'currency': 'optional',
'transaction_id': 'optional',
'movement_name': 'optional',
'effective_date': 'optional',
'units': 'optional',
'start': 'optional',
'flows': 'optional',
'gains': 'optional',
'carry': 'optional',
'end': 'optional',
'properties': 'optional',
'group_id': 'optional'
}
def __init__(self, holding_type=None, instrument_uid=None, sub_holding_keys=None, currency=None, transaction_id=None, movement_name=None, effective_date=None, units=None, start=None, flows=None, gains=None, carry=None, end=None, properties=None, group_id=None): # noqa: E501
"""
A2BMovementRecord - a model defined in OpenAPI
:param holding_type: The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc.
:type holding_type: str
:param instrument_uid: The unique Lusid Instrument Id (LUID) of the instrument that the holding is in.
:type instrument_uid: str
:param sub_holding_keys: The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created.
:type sub_holding_keys: dict[str, lusid.PerpetualProperty]
:param currency: The holding currency.
:type currency: str
:param transaction_id: The unique identifier for the transaction.
:type transaction_id: str
:param movement_name: The name of the movement.
:type movement_name: str
:param effective_date: The date of the movement.
:type effective_date: datetime
:param units: The number of units of the instrument that are affected by the movement.
:type units: float
:param start:
:type start: lusid.A2BCategory
:param flows:
:type flows: lusid.A2BCategory
:param gains:
:type gains: lusid.A2BCategory
:param carry:
:type carry: lusid.A2BCategory
:param end:
:type end: lusid.A2BCategory
:param properties: The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' domain.
:type properties: dict[str, lusid.ModelProperty]
:param group_id: Arbitrary string that can be used to cross reference an entry in the A2B report with activity in the A2B-Movements. This should be used purely as a token. The content should not be relied upon.
:type group_id: str
""" # noqa: E501
self._holding_type = None
self._instrument_uid = None
self._sub_holding_keys = None
self._currency = None
self._transaction_id = None
self._movement_name = None
self._effective_date = None
self._units = None
self._start = None
self._flows = None
self._gains = None
self._carry = None
self._end = None
self._properties = None
self._group_id = None
self.discriminator = None
self.holding_type = holding_type
self.instrument_uid = instrument_uid
self.sub_holding_keys = sub_holding_keys
self.currency = currency
self.transaction_id = transaction_id
self.movement_name = movement_name
if effective_date is not None:
self.effective_date = effective_date
if units is not None:
self.units = units
if start is not None:
self.start = start
if flows is not None:
self.flows = flows
if gains is not None:
self.gains = gains
if carry is not None:
self.carry = carry
if end is not None:
self.end = end
self.properties = properties
self.group_id = group_id
@property
def holding_type(self):
"""Gets the holding_type of this A2BMovementRecord. # noqa: E501
The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. # noqa: E501
:return: The holding_type of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._holding_type
@holding_type.setter
def holding_type(self, holding_type):
"""Sets the holding_type of this A2BMovementRecord.
The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. # noqa: E501
:param holding_type: The holding_type of this A2BMovementRecord. # noqa: E501
:type: str
"""
self._holding_type = holding_type
@property
def instrument_uid(self):
"""Gets the instrument_uid of this A2BMovementRecord. # noqa: E501
The unique Lusid Instrument Id (LUID) of the instrument that the holding is in. # noqa: E501
:return: The instrument_uid of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._instrument_uid
@instrument_uid.setter
def instrument_uid(self, instrument_uid):
"""Sets the instrument_uid of this A2BMovementRecord.
The unique Lusid Instrument Id (LUID) of the instrument that the holding is in. # noqa: E501
:param instrument_uid: The instrument_uid of this A2BMovementRecord. # noqa: E501
:type: str
"""
self._instrument_uid = instrument_uid
@property
def sub_holding_keys(self):
"""Gets the sub_holding_keys of this A2BMovementRecord. # noqa: E501
The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. # noqa: E501
:return: The sub_holding_keys of this A2BMovementRecord. # noqa: E501
:rtype: dict(str, PerpetualProperty)
"""
return self._sub_holding_keys
@sub_holding_keys.setter
def sub_holding_keys(self, sub_holding_keys):
"""Sets the sub_holding_keys of this A2BMovementRecord.
The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. # noqa: E501
:param sub_holding_keys: The sub_holding_keys of this A2BMovementRecord. # noqa: E501
:type: dict(str, PerpetualProperty)
"""
self._sub_holding_keys = sub_holding_keys
@property
def currency(self):
"""Gets the currency of this A2BMovementRecord. # noqa: E501
The holding currency. # noqa: E501
:return: The currency of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this A2BMovementRecord.
The holding currency. # noqa: E501
:param currency: The currency of this A2BMovementRecord. # noqa: E501
:type: str
"""
self._currency = currency
@property
def transaction_id(self):
"""Gets the transaction_id of this A2BMovementRecord. # noqa: E501
The unique identifier for the transaction. # noqa: E501
:return: The transaction_id of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._transaction_id
@transaction_id.setter
def transaction_id(self, transaction_id):
"""Sets the transaction_id of this A2BMovementRecord.
The unique identifier for the transaction. # noqa: E501
:param transaction_id: The transaction_id of this A2BMovementRecord. # noqa: E501
:type: str
"""
self._transaction_id = transaction_id
@property
def movement_name(self):
"""Gets the movement_name of this A2BMovementRecord. # noqa: E501
The name of the movement. # noqa: E501
:return: The movement_name of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._movement_name
@movement_name.setter
def movement_name(self, movement_name):
"""Sets the movement_name of this A2BMovementRecord.
The name of the movement. # noqa: E501
:param movement_name: The movement_name of this A2BMovementRecord. # noqa: E501
:type: str
"""
self._movement_name = movement_name
@property
def effective_date(self):
"""Gets the effective_date of this A2BMovementRecord. # noqa: E501
The date of the movement. # noqa: E501
:return: The effective_date of this A2BMovementRecord. # noqa: E501
:rtype: datetime
"""
return self._effective_date
@effective_date.setter
def effective_date(self, effective_date):
"""Sets the effective_date of this A2BMovementRecord.
The date of the movement. # noqa: E501
:param effective_date: The effective_date of this A2BMovementRecord. # noqa: E501
:type: datetime
"""
self._effective_date = effective_date
@property
def units(self):
"""Gets the units of this A2BMovementRecord. # noqa: E501
The number of units of the instrument that are affected by the movement. # noqa: E501
:return: The units of this A2BMovementRecord. # noqa: E501
:rtype: float
"""
return self._units
@units.setter
def units(self, units):
"""Sets the units of this A2BMovementRecord.
The number of units of the instrument that are affected by the movement. # noqa: E501
:param units: The units of this A2BMovementRecord. # noqa: E501
:type: float
"""
self._units = units
@property
def start(self):
"""Gets the start of this A2BMovementRecord. # noqa: E501
:return: The start of this A2BMovementRecord. # noqa: E501
:rtype: A2BCategory
"""
return self._start
@start.setter
def start(self, start):
"""Sets the start of this A2BMovementRecord.
:param start: The start of this A2BMovementRecord. # noqa: E501
:type: A2BCategory
"""
self._start = start
@property
def flows(self):
"""Gets the flows of this A2BMovementRecord. # noqa: E501
:return: The flows of this A2BMovementRecord. # noqa: E501
:rtype: A2BCategory
"""
return self._flows
@flows.setter
def flows(self, flows):
"""Sets the flows of this A2BMovementRecord.
:param flows: The flows of this A2BMovementRecord. # noqa: E501
:type: A2BCategory
"""
self._flows = flows
@property
def gains(self):
"""Gets the gains of this A2BMovementRecord. # noqa: E501
:return: The gains of this A2BMovementRecord. # noqa: E501
:rtype: A2BCategory
"""
return self._gains
@gains.setter
def gains(self, gains):
"""Sets the gains of this A2BMovementRecord.
:param gains: The gains of this A2BMovementRecord. # noqa: E501
:type: A2BCategory
"""
self._gains = gains
@property
def carry(self):
"""Gets the carry of this A2BMovementRecord. # noqa: E501
:return: The carry of this A2BMovementRecord. # noqa: E501
:rtype: A2BCategory
"""
return self._carry
@carry.setter
def carry(self, carry):
"""Sets the carry of this A2BMovementRecord.
:param carry: The carry of this A2BMovementRecord. # noqa: E501
:type: A2BCategory
"""
self._carry = carry
@property
def end(self):
"""Gets the end of this A2BMovementRecord. # noqa: E501
:return: The end of this A2BMovementRecord. # noqa: E501
:rtype: A2BCategory
"""
return self._end
@end.setter
def end(self, end):
"""Sets the end of this A2BMovementRecord.
:param end: The end of this A2BMovementRecord. # noqa: E501
:type: A2BCategory
"""
self._end = end
@property
def properties(self):
"""Gets the properties of this A2BMovementRecord. # noqa: E501
The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' domain. # noqa: E501
:return: The properties of this A2BMovementRecord. # noqa: E501
:rtype: dict(str, ModelProperty)
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this A2BMovementRecord.
The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' domain. # noqa: E501
:param properties: The properties of this A2BMovementRecord. # noqa: E501
:type: dict(str, ModelProperty)
"""
self._properties = properties
@property
def group_id(self):
"""Gets the group_id of this A2BMovementRecord. # noqa: E501
Arbitrary string that can be used to cross reference an entry in the A2B report with activity in the A2B-Movements. This should be used purely as a token. The content should not be relied upon. # noqa: E501
:return: The group_id of this A2BMovementRecord. # noqa: E501
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this A2BMovementRecord.
Arbitrary string that can be used to cross reference an entry in the A2B report with activity in the A2B-Movements. This should be used purely as a token. The content should not be relied upon. # noqa: E501
:param group_id: The group_id of this A2BMovementRecord. # noqa: E501
:type: str
"""
self._group_id = group_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, A2BMovementRecord):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.992634 | 279 | 0.618697 |
acde8ae61ba0eb3f6e8c7f050f9127600447ecb1 | 630 | py | Python | Gyandaan2/appointments/migrations/0021_remove_appointment_schedule_id_and_more.py | siddanthshetty000/Gyandaan | c65e3e021b84535dc86257e1c0d8152fdeda2269 | [
"Apache-2.0"
] | null | null | null | Gyandaan2/appointments/migrations/0021_remove_appointment_schedule_id_and_more.py | siddanthshetty000/Gyandaan | c65e3e021b84535dc86257e1c0d8152fdeda2269 | [
"Apache-2.0"
] | null | null | null | Gyandaan2/appointments/migrations/0021_remove_appointment_schedule_id_and_more.py | siddanthshetty000/Gyandaan | c65e3e021b84535dc86257e1c0d8152fdeda2269 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 4.0.1 on 2022-01-26 08:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('appointments', '0020_service_user'),
]
operations = [
migrations.RemoveField(
model_name='appointment',
name='schedule_id',
),
migrations.AddField(
model_name='appointment',
name='service_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='appointments.service'),
),
]
| 26.25 | 120 | 0.6 |
acde8bc344c9a8eef49c3f88c69a41ef2806e14b | 13,618 | py | Python | tensorflow_federated/python/learning/algorithms/fed_sgd.py | teo-milea/federated | ce0707a954a531860eb38864b44d7b748fd62aa7 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/algorithms/fed_sgd.py | teo-milea/federated | ce0707a954a531860eb38864b44d7b748fd62aa7 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/learning/algorithms/fed_sgd.py | teo-milea/federated | ce0707a954a531860eb38864b44d7b748fd62aa7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
# This modules disables the Pytype analyzer, see
# https://github.com/tensorflow/federated/blob/main/docs/pytype.md for more
# information.
"""An implementation of the Federated SGD algorithm.
This is the baseline algorithm from:
Communication-Efficient Learning of Deep Networks from Decentralized Data
H. Brendan McMahan, Eider Moore, Daniel Ramage,
Seth Hampson, Blaise Aguera y Arcas. AISTATS 2017.
https://arxiv.org/abs/1602.05629
"""
import collections
from typing import Callable, Optional, Union
import tensorflow as tf
from tensorflow_federated.python.aggregators import factory
from tensorflow_federated.python.aggregators import mean
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_base
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.templates import measured_process
from tensorflow_federated.python.learning import model as model_lib
from tensorflow_federated.python.learning import model_utils
from tensorflow_federated.python.learning.framework import dataset_reduce
from tensorflow_federated.python.learning.metrics import aggregator as metric_aggregator
from tensorflow_federated.python.learning.optimizers import optimizer as optimizer_base
from tensorflow_federated.python.learning.templates import client_works
from tensorflow_federated.python.learning.templates import composers
from tensorflow_federated.python.learning.templates import distributors
from tensorflow_federated.python.learning.templates import finalizers
from tensorflow_federated.python.learning.templates import learning_process
from tensorflow_federated.python.tensorflow_libs import tensor_utils
def _build_client_update(model: model_lib.Model,
use_experimental_simulation_loop: bool = False):
"""Creates client update logic for FedSGD.
Args:
model: A `tff.learning.Model` used to compute gradients.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A `tf.function`.
"""
dataset_reduce_fn = dataset_reduce.build_dataset_reduce_fn(
use_experimental_simulation_loop)
@tf.function
def client_update(initial_weights, dataset):
model_weights = model_utils.ModelWeights.from_model(model)
tf.nest.map_structure(lambda a, b: a.assign(b), model_weights,
initial_weights)
def reduce_fn(state, batch):
"""Runs forward_pass on batch and sums the weighted gradients."""
accumulated_gradients, num_examples_sum = state
with tf.GradientTape() as tape:
output = model.forward_pass(batch)
gradients = tape.gradient(output.loss, model_weights.trainable)
num_examples = tf.cast(output.num_examples, tf.float32)
accumulated_gradients = tuple(
accumulator + num_examples * gradient
for accumulator, gradient in zip(accumulated_gradients, gradients))
# We may be able to optimize the reduce function to avoid doubling the
# number of required variables here (e.g. keeping two copies of all
# gradients). If you're looking to optimize memory usage this might be a
# place to look.
return (accumulated_gradients, num_examples_sum + num_examples)
def _zero_initial_state():
"""Create a tuple of (gradient accumulators, num examples)."""
return tuple(
tf.nest.map_structure(tf.zeros_like,
model_weights.trainable)), tf.constant(
0, dtype=tf.float32)
gradient_sums, num_examples_sum = dataset_reduce_fn(
reduce_fn=reduce_fn,
dataset=dataset,
initial_state_fn=_zero_initial_state)
# We now normalize to compute the average gradient over all examples.
average_gradient = tf.nest.map_structure(
lambda gradient: gradient / num_examples_sum, gradient_sums)
model_output = model.report_local_unfinalized_metrics()
average_gradient, has_non_finite_delta = (
tensor_utils.zero_all_if_any_non_finite(average_gradient))
if has_non_finite_delta > 0:
client_weight = tf.constant(0.0)
else:
client_weight = num_examples_sum
return client_works.ClientResult(
update=average_gradient, update_weight=client_weight), model_output
return client_update
def _build_fed_sgd_client_work(
model_fn: Callable[[], model_lib.Model],
metrics_aggregator: Callable[[
model_lib.MetricFinalizersType, computation_types.StructWithPythonType
], computation_base.Computation],
use_experimental_simulation_loop: bool = False
) -> client_works.ClientWorkProcess:
"""Creates a `tff.learning.templates.ClientWorkProcess` for federated SGD.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
metrics_aggregator: A function that takes in the metric finalizers (i.e.,
`tff.learning.Model.metric_finalizers()`) and a
`tff.types.StructWithPythonType` of the unfinalized metrics (i.e., the TFF
type of `tff.learning.Model.report_local_unfinalized_metrics()`), and
returns a `tff.Computation` for aggregating the unfinalized metrics.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation. It is
currently necessary to set this flag to True for performant GPU
simulations.
Returns:
A `tff.learning.templates.ClientWorkProcess`.
"""
with tf.Graph().as_default():
# Wrap model construction in a graph to avoid polluting the global context
# with variables created for this model.
model = model_fn()
unfinalized_metrics_type = type_conversions.type_from_tensors(
model.report_local_unfinalized_metrics())
metrics_aggregation_fn = metrics_aggregator(model.metric_finalizers(),
unfinalized_metrics_type)
data_type = computation_types.SequenceType(model.input_spec)
weights_type = model_utils.weights_type_from_model(model)
@computations.federated_computation
def init_fn():
return intrinsics.federated_value((), placements.SERVER)
@computations.tf_computation(weights_type, data_type)
def client_update_computation(initial_model_weights, dataset):
client_update = _build_client_update(model_fn(),
use_experimental_simulation_loop)
return client_update(initial_model_weights, dataset)
@computations.federated_computation(
init_fn.type_signature.result, computation_types.at_clients(weights_type),
computation_types.at_clients(data_type))
def next_fn(state, model_weights, client_data):
client_result, model_outputs = intrinsics.federated_map(
client_update_computation, (model_weights, client_data))
train_metrics = metrics_aggregation_fn(model_outputs)
measurements = intrinsics.federated_zip(
collections.OrderedDict(train=train_metrics))
return measured_process.MeasuredProcessOutput(state, client_result,
measurements)
return client_works.ClientWorkProcess(init_fn, next_fn)
DEFAULT_SERVER_OPTIMIZER_FN = lambda: tf.keras.optimizers.SGD(learning_rate=0.1)
def build_fed_sgd(
model_fn: Callable[[], model_lib.Model],
server_optimizer_fn: Union[optimizer_base.Optimizer, Callable[
[], tf.keras.optimizers.Optimizer]] = DEFAULT_SERVER_OPTIMIZER_FN,
model_distributor: Optional[distributors.DistributionProcess] = None,
model_aggregator: Optional[factory.WeightedAggregationFactory] = None,
metrics_aggregator: Optional[Callable[[
model_lib.MetricFinalizersType, computation_types.StructWithPythonType
], computation_base.Computation]] = None,
use_experimental_simulation_loop: bool = False,
) -> learning_process.LearningProcess:
"""Builds a learning process that performs federated SGD.
This function creates a `tff.learning.templates.LearningProcess` that performs
federated SGD on client models. The learning process has the following methods
inherited from `tff.learning.templates.LearningProcess`:
* `initialize`: A `tff.Computation` with type signature `( -> S@SERVER)`,
where `S` is a `tff.learning.templates.LearningAlgorithmState`
representing the initial state of the server.
* `next`: A `tff.Computation` with type signature
`(<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>)` where `S` is a
`LearningAlgorithmState` whose type matches that of the output
of `initialize`, and `{B*}@CLIENTS` represents the client datasets, where
`B` is the type of a single batch. This computation returns a
`LearningAlgorithmState` representing the updated server state and the
metrics during client training and any other metrics from broadcast and
aggregation processes.
* `get_model_weights`: A `tff.Computation` with type signature `(S -> M)`,
where `S` is a `tff.learning.templates.LearningAlgorithmState` whose type
matches the output of `initialize` and `M` represents the type of the
model weights used during training.
Each time `next` is called, the server model is broadcast to each client using
a distributor. Each client sums the gradients for each batch in its local
dataset (without updating its model) to calculate, and averages the gradients
based on their number of examples. These average gradients are then aggregated
at the server, and are applied at the server using a
`tf.keras.optimizers.Optimizer`.
This implements the original FedSGD algorithm in [McMahan et al.,
2017](https://arxiv.org/abs/1602.05629).
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`. This method
must *not* capture TensorFlow tensors or variables and use them. The model
must be constructed entirely from scratch on each invocation, returning
the same pre-constructed model each call will result in an error.
server_optimizer_fn: A `tff.learning.optimizers.Optimizer`, or a no-arg
callable that returns a `tf.keras.Optimizer`. The optimizer is used to
apply client updates to the server model.
model_distributor: An optional `DistributionProcess` that distributes the
model weights on the server to the clients. If set to `None`, the
distributor is constructed via `distributors.build_broadcast_process`.
model_aggregator: An optional `tff.aggregators.WeightedAggregationFactory`
used to aggregate client updates on the server. If `None`, this is set to
`tff.aggregators.MeanFactory`.
metrics_aggregator: A function that takes in the metric finalizers (i.e.,
`tff.learning.Model.metric_finalizers()`) and a
`tff.types.StructWithPythonType` of the unfinalized metrics (i.e., the TFF
type of `tff.learning.Model.report_local_unfinalized_metrics()`), and
returns a `tff.Computation` for aggregating the unfinalized metrics.
use_experimental_simulation_loop: Controls the reduce loop function for
input dataset. An experimental reduce loop is used for simulation.
Returns:
A `tff.learning.templates.LearningProcess`.
"""
py_typecheck.check_callable(model_fn)
@computations.tf_computation()
def initial_model_weights_fn():
return model_utils.ModelWeights.from_model(model_fn())
model_weights_type = initial_model_weights_fn.type_signature.result
if model_distributor is None:
model_distributor = distributors.build_broadcast_process(model_weights_type)
if model_aggregator is None:
model_aggregator = mean.MeanFactory()
aggregator = model_aggregator.create(model_weights_type.trainable,
computation_types.TensorType(tf.float32))
if metrics_aggregator is None:
metrics_aggregator = metric_aggregator.sum_then_finalize
client_work = _build_fed_sgd_client_work(
model_fn,
metrics_aggregator,
use_experimental_simulation_loop=use_experimental_simulation_loop)
finalizer = finalizers.build_apply_optimizer_finalizer(
server_optimizer_fn, model_weights_type)
return composers.compose_learning_process(initial_model_weights_fn,
model_distributor, client_work,
aggregator, finalizer)
| 47.449477 | 88 | 0.754296 |
acde8cb358e96441f9002d7f2c25927583d779a9 | 29,212 | py | Python | tests/test_glob.py | gregplaysguitar/wcmatch | 3e1507dd1369e1841f733453966d5acda2b5dc08 | [
"MIT"
] | null | null | null | tests/test_glob.py | gregplaysguitar/wcmatch | 3e1507dd1369e1841f733453966d5acda2b5dc08 | [
"MIT"
] | null | null | null | tests/test_glob.py | gregplaysguitar/wcmatch | 3e1507dd1369e1841f733453966d5acda2b5dc08 | [
"MIT"
] | null | null | null | """
These test cases are taken straight from cpython to ensure our glob works as good as the builtin.
Matches close to the normal glob implementation, but there are a few consciously
made difference in implementation.
1. Our glob will often return things like `.` and `..` matching Bash's behavior.
2. We do not normalize out `.` and `..`, so the norm function below just joins.
3. We escape with backslashes not `[]`.
4. A Window's path separator will be two backslashes in a pattern due to escape logic, not one.
"""
import contextlib
from wcmatch import glob
from wcmatch import util
import types
import pytest
import os
import shutil
import sys
import unittest
import warnings
# Below is general helper stuff that Python uses in unittests. As these
# not meant for users, and could change without notice, include them
# ourselves so we aren't surprised later.
TESTFN = '@test'
# Disambiguate TESTFN for parallel testing, while letting it remain a valid
# module name.
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
@contextlib.contextmanager
def change_cwd(path, quiet=False):
"""
Return a context manager that changes the current working directory.
Arguments:
path: the directory to use as the temporary current working directory.
quiet: if False (the default), the context manager raises an exception
on error. Otherwise, it issues only a warning and keeps the current
working directory the same.
"""
saved_dir = os.getcwd()
try:
os.chdir(path)
except OSError as exc:
if not quiet:
raise
warnings.warn('tests may fail, unable to change CWD to: ' + path,
RuntimeWarning, stacklevel=3)
try:
yield os.getcwd()
finally:
os.chdir(saved_dir)
def create_empty_file(filename):
"""Create an empty file. If the file already exists, truncate it."""
fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.close(fd)
_can_symlink = None
def can_symlink():
"""Check if we can symlink."""
global _can_symlink
if _can_symlink is not None:
return _can_symlink
symlink_path = TESTFN + "can_symlink"
try:
os.symlink(TESTFN, symlink_path)
can = True
except (OSError, NotImplementedError, AttributeError):
can = False
else:
os.remove(symlink_path)
_can_symlink = can
return can
def skip_unless_symlink(test):
"""Skip decorator for tests that require functional symlink."""
ok = can_symlink()
msg = "Requires functional symlink implementation"
return test if ok else unittest.skip(msg)(test)
class Options():
"""Test options."""
def __init__(self, **kwargs):
"""Initialize."""
self._options = kwargs
def get(self, key, default=None):
"""Get option vallue."""
return self._options.get(key, default)
class _TestGlob:
"""
Test glob.
Each list entry in `cases` is run through the `glob`, then the pattern and results are converted
to bytes and run trough `glob` again. Results are checked against the provided result list.
There are a couple special types that can be inserted in the case list that can alter
the behavior of the cases that follow.
* Strings: These will be printed and then the next case will be processed.
* Options: This object takes keyword parameters that are used to alter the next tests options:
* absolute: When joining path parts, due not append to the tempdir.
* skip: Skip tests when this is enabled.
* cwd_temp: Switch the current working directory to the temp directory instead of having to prepend
the temp direcotry to patterns and results.
Each test case entry (list) is an array of up to 3 parameters (2 minimum).
* Pattern: a list of path parts that are to be joined with the current OS separator.
* Expected result (filenames matched by the pattern): a list of sublists where each sublist contains
path parts that are to be joined with the current OS separator.
Each path represents a full file path ot match.
* Flags
The default flags are: GLOBSTAR | EXTGLOB | BRACE. If any of these flags are provided in
a test case, they will disable the default of the same name. All other flags will enable flags as expected.
"""
DEFAULT_FLAGS = glob.BRACE | glob.EXTGLOB | glob.GLOBSTAR
cases = []
@classmethod
def norm(cls, *parts):
"""Normalizes file path (in relation to temp dir)."""
return os.path.join(cls.tempdir, *parts)
@classmethod
def globjoin(cls, *parts):
"""Joins glob path."""
sep = cls.globsep
return sep.join(list(parts))
@classmethod
def mktemp(cls, *parts):
"""Make temp directory."""
filename = cls.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
retry = 3
while retry:
try:
os.makedirs(base)
retry = 0
except Exception:
retry -= 1
create_empty_file(filename)
@classmethod
def setup_class(cls):
"""Setup."""
cls.absolute = False
cls.skip = False
cls.cwd_temp = False
cls.just_negative = False
if os.sep == '/':
cls.globsep = os.sep
else:
cls.globsep = r'\\'
cls.tempdir = TESTFN + "_dir"
cls.setup_fs()
@classmethod
def setup_fs(cls):
"""Setup filesystem."""
@classmethod
def teardown_class(cls):
"""Cleanup."""
retry = 3
while retry:
try:
shutil.rmtree(cls.tempdir)
retry = 0
except Exception:
retry -= 1
@staticmethod
def assert_equal(a, b):
"""Assert equal."""
assert a == b, "Comparison between objects yielded false."
@staticmethod
def assert_count_equal(a, b):
"""Assert count equal."""
c1 = len(list(a)) if isinstance(a, types.GeneratorType) else len(a)
c2 = len(list(b)) if isinstance(b, types.GeneratorType) else len(b)
assert c1 == c2, "Length of %d does not equal %d" % (c1, c2)
@classmethod
def glob(cls, *parts, **kwargs):
"""Perform a glob with validation."""
if parts:
if len(parts) == 1:
p = parts[0]
else:
p = cls.globjoin(*parts)
if not cls.absolute:
p = cls.globjoin(cls.tempdir, p)
else:
p = cls.tempdir
res = glob.glob(p, **kwargs)
print("RESULTS: ", res)
if res:
cls.assert_equal({type(r) for r in res}, {str})
cls.assert_count_equal(glob.iglob(p, **kwargs), res)
bres = [os.fsencode(x) for x in res]
cls.assert_count_equal(glob.glob(os.fsencode(p), **kwargs), bres)
cls.assert_count_equal(glob.iglob(os.fsencode(p), **kwargs), bres)
if bres:
cls.assert_equal({type(r) for r in bres}, {bytes})
return res
@classmethod
def nglob(cls, *parts, **kwargs):
"""Perform a glob with validation."""
if parts:
if len(parts) == 1:
p = parts[0]
else:
p = cls.globjoin(*parts)
if not cls.absolute:
p = cls.globjoin(cls.tempdir, p)
else:
p = cls.tempdir
p = '!' + p
if not cls.just_negative:
if not cls.absolute:
p = [cls.globjoin(cls.tempdir, '**'), p]
else:
p = ['**', p]
else:
p = [p]
res = glob.glob(p, **kwargs)
print("RESULTS: ", res)
if res:
cls.assert_equal({type(r) for r in res}, {str})
cls.assert_count_equal(glob.iglob(p, **kwargs), res)
bres = [os.fsencode(x) for x in res]
cls.assert_count_equal(glob.glob([os.fsencode(x) for x in p], **kwargs), bres)
cls.assert_count_equal(glob.iglob([os.fsencode(x) for x in p], **kwargs), bres)
if bres:
cls.assert_equal({type(r) for r in bres}, {bytes})
return res
@classmethod
def assertSequencesEqual_noorder(cls, l1, l2):
"""Verify lists match (unordered)."""
l1 = list(l1)
l2 = list(l2)
cls.assert_equal(set(l1), set(l2))
cls.assert_equal(sorted(l1), sorted(l2))
@classmethod
def eval_glob_cases(cls, case):
"""Eval glob cases."""
eq = cls.assertSequencesEqual_noorder
# for case in self.cases:
if isinstance(case, Options):
absolute = case.get('absolute')
if absolute is not None:
cls.absolute = absolute
skip = case.get('skip')
if skip is not None:
cls.skip = skip
cwd_temp = case.get('cwd_temp')
if cwd_temp is not None:
cls.cwd_temp = cwd_temp
just_negative = case.get('just_negative')
if just_negative is not None:
cls.just_negative = just_negative
pytest.skip("Change Options")
if cls.skip:
pytest.skip("Skipped")
pattern = case[0]
if not cls.absolute:
results = [cls.norm(*x) for x in case[1]] if case[1] is not None else None
else:
results = [os.path.join(*list(x)) for x in case[1]] if case[1] is not None else None
flags = cls.DEFAULT_FLAGS
if len(case) > 2:
flags ^= case[2]
negative = flags & glob.N
print("PATTERN: ", pattern)
print("FLAGS: ", bin(flags))
print("NEGATIVE: ", bin(negative))
print("EXPECTED: ", results)
if cls.cwd_temp:
with change_cwd(cls.tempdir):
res = cls.nglob(*pattern, flags=flags) if negative else cls.glob(*pattern, flags=flags)
else:
res = cls.nglob(*pattern, flags=flags) if negative else cls.glob(*pattern, flags=flags)
if results is not None:
eq(res, results)
print('\n')
class Testglob(_TestGlob):
"""
Test glob.
See `_TestGlob` class for more information in regards to test case format.
"""
cases = [
# Test literal.
[('a',), [('a',)]],
[('a', 'D'), [('a', 'D')]],
[('aab',), [('aab',)]],
[('zymurgy',), []],
Options(absolute=True),
[['*'], None],
[[os.curdir, '*'], None],
Options(absolute=False),
# Glob one directory
[('a*',), [('a',), ('aab',), ('aaa',)]],
[('*a',), [('a',), ('aaa',)]],
[('.*',), [('.',), ('..',), ('.aa',), ('.bb',)]],
[('?aa',), [('aaa',)]],
[('aa?',), [('aaa',), ('aab',)]],
[('aa[ab]',), [('aaa',), ('aab',)]],
[('*q',), []],
# Glob inverse
[
('a*',),
[
('EF',), ('ZZZ',), ('',)
] if not can_symlink() else [
('EF',), ('ZZZ',), ('',), ('sym1',), ('sym3',), ('sym2',),
('sym3', 'efg'), ('sym3', 'efg', 'ha'), ('sym3', 'EF')
],
glob.N
],
# Test nested glob directory
[
('a', 'bcd', 'E*'),
[('a', 'bcd', 'EF')] if util.is_case_sensitive() else [('a', 'bcd', 'EF'), ('a', 'bcd', 'efg')]
],
[('a', 'bcd', '*g'), [('a', 'bcd', 'efg')]],
# Test glob directory names.
[('*', 'D'), [('a', 'D')]],
[('*', '*a'), []],
[('a', '*', '*', '*a'), [('a', 'bcd', 'efg', 'ha')]],
[('?a?', '*F'), [('aaa', 'zzzF'), ('aab', 'F')]],
# Test glob magic in drive name.
Options(absolute=True, skip=sys.platform != "win32"),
[('*:',), []],
[('?:',), []],
[(r'\\\\?\\c:\\',), [('\\\\?\\c:\\',)]],
[(r'\\\\*\\*\\',), []],
Options(absolute=False, skip=False),
Options(skip=not can_symlink()),
# Test broken symlinks
[('sym*',), [('sym1',), ('sym2',), ('sym3',)]],
[('sym1',), [('sym1',)]],
[('sym2',), [('sym2',)]],
# Test glob symlinks.,
[('sym3',), [('sym3',)]],
[('sym3', '*'), [('sym3', 'EF'), ('sym3', 'efg')]],
[('sym3', ''), [('sym3', '')]],
[('*', '*F'), [('aaa', 'zzzF'), ('aab', 'F'), ('sym3', 'EF')]],
Options(skip=False),
# Test only directories
[
('*', ''),
[
('aab', ''), ('aaa', ''), ('a', '')
] if not can_symlink() else [
('aab', ''), ('aaa', ''), ('a', ''), ('sym3', '')
]
],
Options(skip=util.is_case_sensitive()),
[
('*\\',),
[
('aab', ''), ('aaa', ''), ('a', '')
] if not can_symlink() else [
('aab', ''), ('aaa', ''), ('a', ''), ('sym3', '')
]
],
Options(skip=False),
# Test extglob.
[('@(a|aa*(a|b))',), [('aab',), ('aaa',), ('a',)]],
# Test sequences.
[('[a]',), [('a',)]],
[('[!b]',), [('a',)]],
[('[^b]',), [('a',)]],
[(r'@([\a]|\aaa)',), [('a',), ('aaa',)]],
Options(absolute=True),
# Test empty.
[('',), []],
Options(absolute=False),
# Patterns ending with a slash shouldn't match non-dirs.
[('Z*Z', ''), []],
[('ZZZ', ''), []],
[('aa*', ''), [('aaa', ''), ('aab', '')]],
# Test recurision.
[
('**',),
[
('',),
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F')
] if not can_symlink() else [
('',),
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F'),
('sym1',), ('sym2',),
('sym3',),
('sym3', 'EF'),
('sym3', 'efg'),
('sym3', 'efg', 'ha')
]
],
[
('**', '**'),
[
('',),
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F'),
] if not can_symlink() else [
('',),
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F'),
('sym1',), ('sym2',),
('sym3',),
('sym3', 'EF'),
('sym3', 'efg'),
('sym3', 'efg', 'ha')
]
],
[
('.', '**'),
[
('.', ''),
('.', 'EF'), ('.', 'ZZZ'),
('.', 'a'), ('.', 'a', 'D'),
('.', 'a', 'bcd'),
('.', 'a', 'bcd', 'EF'),
('.', 'a', 'bcd', 'efg'),
('.', 'a', 'bcd', 'efg', 'ha'),
('.', 'aaa'), ('.', 'aaa', 'zzzF'),
('.', 'aab'), ('.', 'aab', 'F'),
] if not can_symlink() else [
('.', ''),
('.', 'EF'), ('.', 'ZZZ'),
('.', 'a'), ('.', 'a', 'D'),
('.', 'a', 'bcd'),
('.', 'a', 'bcd', 'EF'),
('.', 'a', 'bcd', 'efg'),
('.', 'a', 'bcd', 'efg', 'ha'),
('.', 'aaa'), ('.', 'aaa', 'zzzF'),
('.', 'aab'), ('.', 'aab', 'F'),
('.', 'sym1'), ('.', 'sym2'),
('.', 'sym3'),
('.', 'sym3', 'EF'),
('.', 'sym3', 'efg'),
('.', 'sym3', 'efg', 'ha')
]
],
[
('**', ''),
# Dirs
[
('',),
('a', ''), ('a', 'bcd', ''), ('a', 'bcd', 'efg', ''),
('aaa', ''), ('aab', '')
] if not can_symlink() else [
('',),
('a', ''), ('a', 'bcd', ''), ('a', 'bcd', 'efg', ''),
('aaa', ''), ('aab', ''),
('sym3', ''), ('sym3', 'efg', '')
]
],
[
('a', '**'),
[('a', ''), ('a', 'D'), ('a', 'bcd'), ('a', 'bcd', 'EF'), ('a', 'bcd', 'efg'), ('a', 'bcd', 'efg', 'ha')]
],
[('a**',), [('a',), ('aaa',), ('aab',)]],
[
('**', 'EF'),
[('a', 'bcd', 'EF'), ('EF',)] if not can_symlink() else [('a', 'bcd', 'EF'), ('EF',), ('sym3', 'EF')]
],
[
('**', '*F'),
[
('a', 'bcd', 'EF'), ('aaa', 'zzzF'), ('aab', 'F'), ('EF',)
] if not can_symlink() else [
('a', 'bcd', 'EF'), ('aaa', 'zzzF'), ('aab', 'F'), ('EF',), ('sym3', 'EF')
]
],
[('**', '*F', ''), []],
[('**', 'bcd', '*'), [('a', 'bcd', 'EF'), ('a', 'bcd', 'efg')]],
[('a', '**', 'bcd'), [('a', 'bcd')]],
Options(cwd_temp=True, absolute=True),
[
('**',),
[
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F')
] if not can_symlink() else [
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F'),
('sym1',), ('sym2',),
('sym3',),
('sym3', 'EF'),
('sym3', 'efg'),
('sym3', 'efg', 'ha')
]
],
[
('**', '*'),
[
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F')
] if not can_symlink() else [
('EF',), ('ZZZ',),
('a',), ('a', 'D'),
('a', 'bcd'),
('a', 'bcd', 'EF'),
('a', 'bcd', 'efg'),
('a', 'bcd', 'efg', 'ha'),
('aaa',), ('aaa', 'zzzF'),
('aab',), ('aab', 'F'),
('sym1',), ('sym2',),
('sym3',),
('sym3', 'EF'),
('sym3', 'efg'),
('sym3', 'efg', 'ha')
]
],
[
(os.curdir, '**'),
[
('.', ''),
('.', 'EF'), ('.', 'ZZZ'),
('.', 'a',), ('.', 'a', 'D'),
('.', 'a', 'bcd'),
('.', 'a', 'bcd', 'EF'),
('.', 'a', 'bcd', 'efg'),
('.', 'a', 'bcd', 'efg', 'ha'),
('.', 'aaa'), ('.', 'aaa', 'zzzF'),
('.', 'aab'), ('.', 'aab', 'F')
] if not can_symlink() else [
('.', ''),
('.', 'EF',), ('.', 'ZZZ'),
('.', 'a',), ('.', 'a', 'D'),
('.', 'a', 'bcd'),
('.', 'a', 'bcd', 'EF'),
('.', 'a', 'bcd', 'efg'),
('.', 'a', 'bcd', 'efg', 'ha'),
('.', 'aaa'), ('.', 'aaa', 'zzzF'),
('.', 'aab'), ('.', 'aab', 'F'),
('.', 'sym1'), ('.', 'sym2'),
('.', 'sym3'),
('.', 'sym3', 'EF'),
('.', 'sym3', 'efg'),
('.', 'sym3', 'efg', 'ha')
]
],
[
(os.curdir, '**', '*'),
[
('.', 'EF'), ('.', 'ZZZ'),
('.', 'a',), ('.', 'a', 'D'),
('.', 'a', 'bcd'),
('.', 'a', 'bcd', 'EF'),
('.', 'a', 'bcd', 'efg'),
('.', 'a', 'bcd', 'efg', 'ha'),
('.', 'aaa'), ('.', 'aaa', 'zzzF'),
('.', 'aab'), ('.', 'aab', 'F')
] if not can_symlink() else [
('.', 'EF',), ('.', 'ZZZ'),
('.', 'a',), ('.', 'a', 'D'),
('.', 'a', 'bcd'),
('.', 'a', 'bcd', 'EF'),
('.', 'a', 'bcd', 'efg'),
('.', 'a', 'bcd', 'efg', 'ha'),
('.', 'aaa'), ('.', 'aaa', 'zzzF'),
('.', 'aab'), ('.', 'aab', 'F'),
('.', 'sym1'), ('.', 'sym2'),
('.', 'sym3'),
('.', 'sym3', 'EF'),
('.', 'sym3', 'efg'),
('.', 'sym3', 'efg', 'ha')
]
],
[
('**', ''),
[
('a', ''), ('a', 'bcd', ''), ('a', 'bcd', 'efg', ''),
('aaa', ''), ('aab', '')
] if not can_symlink() else [
('a', ''), ('a', 'bcd', ''), ('a', 'bcd', 'efg', ''),
('aaa', ''), ('aab', ''),
('sym3', ''), ('sym3', 'efg', '')
]
],
[
(os.curdir, '**', ''),
[
('.', ''),
('.', 'a', ''), ('.', 'a', 'bcd', ''), ('.', 'a', 'bcd', 'efg', ''),
('.', 'aaa', ''), ('.', 'aab', '')
] if not can_symlink() else [
('.', ''),
('.', 'a', ''), ('.', 'a', 'bcd', ''), ('.', 'a', 'bcd', 'efg', ''),
('.', 'aaa', ''), ('.', 'aab', ''),
('.', 'sym3', ''), ('.', 'sym3', 'efg', '')
]
],
[('**', 'zz*F'), [('aaa', 'zzzF')]],
[('**zz*F',), []],
[
('**', 'EF'),
[('a', 'bcd', 'EF'), ('EF',)] if not can_symlink() else [('a', 'bcd', 'EF'), ('EF',), ('sym3', 'EF')]
],
Options(just_negative=True),
[
('a*',),
[
('EF',), ('ZZZ',)
] if not can_symlink() else [
('EF',), ('ZZZ',),
('sym1',), ('sym3',), ('sym2',), ('sym3', 'efg'), ('sym3', 'efg', 'ha'), ('sym3', 'EF')
],
glob.N
],
Options(just_negative=False, cwd_temp=False, absolute=False),
# Test the file directly -- without magic.
[[], [[]]]
]
@classmethod
def setup_fs(cls):
"""Setup filesystem."""
cls.mktemp('a', 'D')
cls.mktemp('aab', 'F')
cls.mktemp('.aa', 'G')
cls.mktemp('.bb', 'H')
cls.mktemp('aaa', 'zzzF')
cls.mktemp('ZZZ')
cls.mktemp('EF')
cls.mktemp('a', 'bcd', 'EF')
cls.mktemp('a', 'bcd', 'efg', 'ha')
cls.can_symlink = can_symlink()
if cls.can_symlink:
os.symlink(cls.norm('broken'), cls.norm('sym1'))
os.symlink('broken', cls.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), cls.norm('sym3'))
@pytest.mark.parametrize("case", cases)
def test_glob_cases(self, case):
"""Test glob cases."""
self.eval_glob_cases(case)
class TestGlobCornerCase(_TestGlob):
"""
Some tests that need a very specific file set to test against for corner cases.
See _TestGlob class for more information in regards to test case format.
"""
cases = [
# Test very specific, special cases.
[('a[/]b',), [('a[', ']b',)]],
[('@(a/b)',), []],
[('@(a[/]b)',), []],
[('test[',), [('test[',)]],
[(r'a\/b',), [('a', 'b')] if not util.is_case_sensitive() else []],
[(r'a[\/]b',), [('a[', ']b')] if not util.is_case_sensitive() else []],
Options(skip=util.is_case_sensitive()),
[('a[\\',), [('a[', '')]],
[('@(a[\\',), [('@(a[', '')]],
Options(skip=False)
]
@classmethod
def setup_fs(cls):
"""Setup filesystem."""
cls.mktemp('test[')
cls.mktemp('a', 'b')
cls.mktemp('a[', ']b')
cls.mktemp('@(a', 'b)')
cls.mktemp('@(a[', ']b)')
cls.can_symlink = can_symlink()
@pytest.mark.parametrize("case", cases)
def test_glob_cases(self, case):
"""Test glob cases."""
self.eval_glob_cases(case)
class TestGlobEscapes(unittest.TestCase):
"""Test escaping."""
def check_escape(self, arg, expected, raw=False):
"""Verify escapes."""
if raw:
self.assertEqual(glob.raw_escape(arg), expected)
self.assertEqual(glob.raw_escape(os.fsencode(arg)), os.fsencode(expected))
else:
self.assertEqual(glob.escape(arg), expected)
self.assertEqual(glob.escape(os.fsencode(arg)), os.fsencode(expected))
def test_escape(self):
"""Test path escapes."""
check = self.check_escape
check('abc', 'abc')
check('[', r'\[')
check('?', r'\?')
check('*', r'\*')
check('[[_/*?*/_]]', r'\[\[_/\*\?\*/_]]')
check('/[[_/*?*/_]]/', r'/\[\[_/\*\?\*/_]]/')
def test_raw_escape(self):
"""Test path escapes."""
check = self.check_escape
check('abc', 'abc', raw=True)
check('[', r'\[', raw=True)
check('?', r'\?', raw=True)
check('*', r'\*', raw=True)
check('[[_/*?*/_]]', r'\[\[_/\*\?\*/_]]', raw=True)
check('/[[_/*?*/_]]/', r'/\[\[_/\*\?\*/_]]/', raw=True)
check(r'\x3f', r'\?', raw=True)
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_escape_windows(self):
"""Test windows escapes."""
check = self.check_escape
check('a:\\?', r'a:\\\?')
check('b:\\*', r'b:\\\*')
check(r'\\\\?\\c:\\?', r'\\\\?\\c:\\\?')
check(r'\\\\*\\*\\*', r'\\\\*\\*\\\*')
check('//?/c:/?', r'//?/c:/\?')
check('//*/*/*', r'//*/*/\*')
@skip_unless_symlink
class SymlinkLoopGlobTests(unittest.TestCase):
"""Symlink loop test case."""
DEFAULT_FLAGS = glob.BRACE | glob.EXTGLOB | glob.GLOBSTAR
def globjoin(self, *parts):
"""Joins glob path."""
sep = os.fsencode(self.globsep) if isinstance(parts[0], bytes) else self.globsep
return sep.join(list(parts))
def setUp(self):
"""Setup."""
if os.sep == '/':
self.globsep = os.sep
else:
self.globsep = r'\\'
def test_selflink(self):
"""Test self links."""
tempdir = TESTFN + "_dir"
os.makedirs(tempdir)
self.addCleanup(shutil.rmtree, tempdir)
with change_cwd(tempdir):
os.makedirs('dir')
create_empty_file(os.path.join('dir', 'file'))
os.symlink(os.curdir, os.path.join('dir', 'link'))
results = glob.glob('**', flags=self.DEFAULT_FLAGS)
self.assertEqual(len(results), len(set(results)))
results = set(results)
depth = 0
while results:
path = os.path.join(*(['dir'] + ['link'] * depth))
self.assertIn(path, results)
results.remove(path)
if not results:
break
path = os.path.join(path, 'file')
self.assertIn(path, results)
results.remove(path)
depth += 1
results = glob.glob(os.path.join('**', 'file'), flags=self.DEFAULT_FLAGS)
self.assertEqual(len(results), len(set(results)))
results = set(results)
depth = 0
while results:
path = os.path.join(*(['dir'] + ['link'] * depth + ['file']))
self.assertIn(path, results)
results.remove(path)
depth += 1
results = glob.glob(self.globjoin('**', ''), flags=self.DEFAULT_FLAGS)
self.assertEqual(len(results), len(set(results)))
results = set(results)
depth = 0
while results:
path = os.path.join(*(['dir'] + ['link'] * depth + ['']))
self.assertIn(path, results)
results.remove(path)
depth += 1
| 32.065862 | 117 | 0.398911 |
acde8d73a50be94a4a7e661a4164fc4545641b8e | 6,141 | py | Python | homeassistant/helpers/discovery.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 6 | 2017-08-02T19:26:39.000Z | 2020-03-14T22:47:41.000Z | homeassistant/helpers/discovery.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 54 | 2020-11-17T07:04:57.000Z | 2022-03-31T06:45:39.000Z | homeassistant/helpers/discovery.py | miccico/core | 14c205384171dee59c1a908f8449f9864778b2dc | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Helper methods to help with platform discovery.
There are two different types of discoveries that can be fired/listened for.
- listen/discover is for services. These are targeted at a component.
- listen_platform/discover_platform is for platforms. These are used by
components to allow discovery of their platforms.
"""
from typing import Any, Callable, Collection, Dict, Optional, Union
from homeassistant import core, setup
from homeassistant.const import ATTR_DISCOVERED, ATTR_SERVICE, EVENT_PLATFORM_DISCOVERED
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
EVENT_LOAD_PLATFORM = "load_platform.{}"
ATTR_PLATFORM = "platform"
@bind_hass
def listen(
hass: core.HomeAssistant, service: Union[str, Collection[str]], callback: Callable
) -> None:
"""Set up listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
run_callback_threadsafe(hass.loop, async_listen, hass, service, callback).result()
@core.callback
@bind_hass
def async_listen(
hass: core.HomeAssistant, service: Union[str, Collection[str]], callback: Callable
) -> None:
"""Set up listener for discovery of specific service.
Service can be a string or a list/tuple.
"""
if isinstance(service, str):
service = (service,)
else:
service = tuple(service)
job = core.HassJob(callback)
async def discovery_event_listener(event: core.Event) -> None:
"""Listen for discovery events."""
if ATTR_SERVICE in event.data and event.data[ATTR_SERVICE] in service:
task = hass.async_run_hass_job(
job, event.data[ATTR_SERVICE], event.data.get(ATTR_DISCOVERED)
)
if task:
await task
hass.bus.async_listen(EVENT_PLATFORM_DISCOVERED, discovery_event_listener)
@bind_hass
def discover(
hass: core.HomeAssistant,
service: str,
discovered: DiscoveryInfoType,
component: str,
hass_config: ConfigType,
) -> None:
"""Fire discovery event. Can ensure a component is loaded."""
hass.add_job(
async_discover( # type: ignore
hass, service, discovered, component, hass_config
)
)
@bind_hass
async def async_discover(
hass: core.HomeAssistant,
service: str,
discovered: Optional[DiscoveryInfoType],
component: Optional[str],
hass_config: ConfigType,
) -> None:
"""Fire discovery event. Can ensure a component is loaded."""
if component is not None and component not in hass.config.components:
await setup.async_setup_component(hass, component, hass_config)
data: Dict[str, Any] = {ATTR_SERVICE: service}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
@bind_hass
def listen_platform(
hass: core.HomeAssistant, component: str, callback: Callable
) -> None:
"""Register a platform loader listener."""
run_callback_threadsafe(
hass.loop, async_listen_platform, hass, component, callback
).result()
@bind_hass
def async_listen_platform(
hass: core.HomeAssistant,
component: str,
callback: Callable[[str, Optional[Dict[str, Any]]], Any],
) -> None:
"""Register a platform loader listener.
This method must be run in the event loop.
"""
service = EVENT_LOAD_PLATFORM.format(component)
job = core.HassJob(callback)
async def discovery_platform_listener(event: core.Event) -> None:
"""Listen for platform discovery events."""
if event.data.get(ATTR_SERVICE) != service:
return
platform = event.data.get(ATTR_PLATFORM)
if not platform:
return
task = hass.async_run_hass_job(job, platform, event.data.get(ATTR_DISCOVERED))
if task:
await task
hass.bus.async_listen(EVENT_PLATFORM_DISCOVERED, discovery_platform_listener)
@bind_hass
def load_platform(
hass: core.HomeAssistant,
component: str,
platform: str,
discovered: DiscoveryInfoType,
hass_config: ConfigType,
) -> None:
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = EVENT_LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
"""
hass.add_job(
async_load_platform( # type: ignore
hass, component, platform, discovered, hass_config
)
)
@bind_hass
async def async_load_platform(
hass: core.HomeAssistant,
component: str,
platform: str,
discovered: DiscoveryInfoType,
hass_config: ConfigType,
) -> None:
"""Load a component and platform dynamically.
Target components will be loaded and an EVENT_PLATFORM_DISCOVERED will be
fired to load the platform. The event will contain:
{ ATTR_SERVICE = EVENT_LOAD_PLATFORM + '.' + <<component>>
ATTR_PLATFORM = <<platform>>
ATTR_DISCOVERED = <<discovery info>> }
Use `listen_platform` to register a callback for these events.
Warning: Do not await this inside a setup method to avoid a dead lock.
Use `hass.async_create_task(async_load_platform(..))` instead.
This method is a coroutine.
"""
assert hass_config, "You need to pass in the real hass config"
setup_success = True
if component not in hass.config.components:
setup_success = await setup.async_setup_component(hass, component, hass_config)
# No need to fire event if we could not set up component
if not setup_success:
return
data: Dict[str, Any] = {
ATTR_SERVICE: EVENT_LOAD_PLATFORM.format(component),
ATTR_PLATFORM: platform,
}
if discovered is not None:
data[ATTR_DISCOVERED] = discovered
hass.bus.async_fire(EVENT_PLATFORM_DISCOVERED, data)
| 30.251232 | 88 | 0.695326 |
acde8da1389672c14b935527b0fd51d1814758b1 | 11,333 | py | Python | mypy/test/typefixture.py | TV4Fun/mypy | fe6fd1a10cd133fd6833ff64a68a27d0641820dc | [
"PSF-2.0"
] | null | null | null | mypy/test/typefixture.py | TV4Fun/mypy | fe6fd1a10cd133fd6833ff64a68a27d0641820dc | [
"PSF-2.0"
] | null | null | null | mypy/test/typefixture.py | TV4Fun/mypy | fe6fd1a10cd133fd6833ff64a68a27d0641820dc | [
"PSF-2.0"
] | null | null | null | """Fixture used in type-related test cases.
It contains class TypeInfos and Type objects.
"""
from typing import List, Optional
from mypy.types import (
Type, TypeVarType, AnyType, NoneTyp, Instance, CallableType, TypeVarDef, TypeType,
UninhabitedType, TypeOfAny
)
from mypy.nodes import (
TypeInfo, ClassDef, Block, ARG_POS, ARG_OPT, ARG_STAR, SymbolTable,
COVARIANT)
class TypeFixture:
"""Helper class that is used as a fixture in type-related unit tests.
The members are initialized to contain various type-related values.
"""
def __init__(self, variance: int=COVARIANT) -> None:
# The 'object' class
self.oi = self.make_type_info('builtins.object') # class object
self.o = Instance(self.oi, []) # object
# Type variables (these are effectively global)
def make_type_var(name: str, id: int, values: List[Type], upper_bound: Type,
variance: int) -> TypeVarType:
return TypeVarType(TypeVarDef(name, name, id, values, upper_bound, variance))
self.t = make_type_var('T', 1, [], self.o, variance) # T`1 (type variable)
self.tf = make_type_var('T', -1, [], self.o, variance) # T`-1 (type variable)
self.tf2 = make_type_var('T', -2, [], self.o, variance) # T`-2 (type variable)
self.s = make_type_var('S', 2, [], self.o, variance) # S`2 (type variable)
self.s1 = make_type_var('S', 1, [], self.o, variance) # S`1 (type variable)
self.sf = make_type_var('S', -2, [], self.o, variance) # S`-2 (type variable)
self.sf1 = make_type_var('S', -1, [], self.o, variance) # S`-1 (type variable)
# Simple types
self.anyt = AnyType(TypeOfAny.special_form)
self.nonet = NoneTyp()
self.uninhabited = UninhabitedType()
# Abstract class TypeInfos
# class F
self.fi = self.make_type_info('F', is_abstract=True)
# class F2
self.f2i = self.make_type_info('F2', is_abstract=True)
# class F3(F)
self.f3i = self.make_type_info('F3', is_abstract=True, mro=[self.fi])
# Class TypeInfos
self.std_tuplei = self.make_type_info('builtins.tuple',
mro=[self.oi],
typevars=['T'],
variances=[COVARIANT]) # class tuple
self.type_typei = self.make_type_info('builtins.type') # class type
self.functioni = self.make_type_info('builtins.function') # function TODO
self.ai = self.make_type_info('A', mro=[self.oi]) # class A
self.bi = self.make_type_info('B', mro=[self.ai, self.oi]) # class B(A)
self.ci = self.make_type_info('C', mro=[self.ai, self.oi]) # class C(A)
self.di = self.make_type_info('D', mro=[self.oi]) # class D
# class E(F)
self.ei = self.make_type_info('E', mro=[self.fi, self.oi])
# class E2(F2, F)
self.e2i = self.make_type_info('E2', mro=[self.f2i, self.fi, self.oi])
# class E3(F, F2)
self.e3i = self.make_type_info('E3', mro=[self.fi, self.f2i, self.oi])
# Generic class TypeInfos
# G[T]
self.gi = self.make_type_info('G', mro=[self.oi],
typevars=['T'],
variances=[variance])
# G2[T]
self.g2i = self.make_type_info('G2', mro=[self.oi],
typevars=['T'],
variances=[variance])
# H[S, T]
self.hi = self.make_type_info('H', mro=[self.oi],
typevars=['S', 'T'],
variances=[variance, variance])
# GS[T, S] <: G[S]
self.gsi = self.make_type_info('GS', mro=[self.gi, self.oi],
typevars=['T', 'S'],
variances=[variance, variance],
bases=[Instance(self.gi, [self.s])])
# GS2[S] <: G[S]
self.gs2i = self.make_type_info('GS2', mro=[self.gi, self.oi],
typevars=['S'],
variances=[variance],
bases=[Instance(self.gi, [self.s1])])
# list[T]
self.std_listi = self.make_type_info('builtins.list', mro=[self.oi],
typevars=['T'],
variances=[variance])
# Instance types
self.std_tuple = Instance(self.std_tuplei, [self.anyt]) # tuple
self.type_type = Instance(self.type_typei, []) # type
self.function = Instance(self.functioni, []) # function TODO
self.a = Instance(self.ai, []) # A
self.b = Instance(self.bi, []) # B
self.c = Instance(self.ci, []) # C
self.d = Instance(self.di, []) # D
self.e = Instance(self.ei, []) # E
self.e2 = Instance(self.e2i, []) # E2
self.e3 = Instance(self.e3i, []) # E3
self.f = Instance(self.fi, []) # F
self.f2 = Instance(self.f2i, []) # F2
self.f3 = Instance(self.f3i, []) # F3
# Generic instance types
self.ga = Instance(self.gi, [self.a]) # G[A]
self.gb = Instance(self.gi, [self.b]) # G[B]
self.gd = Instance(self.gi, [self.d]) # G[D]
self.go = Instance(self.gi, [self.o]) # G[object]
self.gt = Instance(self.gi, [self.t]) # G[T`1]
self.gtf = Instance(self.gi, [self.tf]) # G[T`-1]
self.gtf2 = Instance(self.gi, [self.tf2]) # G[T`-2]
self.gs = Instance(self.gi, [self.s]) # G[S]
self.gdyn = Instance(self.gi, [self.anyt]) # G[Any]
self.g2a = Instance(self.g2i, [self.a]) # G2[A]
self.gsaa = Instance(self.gsi, [self.a, self.a]) # GS[A, A]
self.gsab = Instance(self.gsi, [self.a, self.b]) # GS[A, B]
self.gsba = Instance(self.gsi, [self.b, self.a]) # GS[B, A]
self.gs2a = Instance(self.gs2i, [self.a]) # GS2[A]
self.gs2b = Instance(self.gs2i, [self.b]) # GS2[B]
self.gs2d = Instance(self.gs2i, [self.d]) # GS2[D]
self.hab = Instance(self.hi, [self.a, self.b]) # H[A, B]
self.haa = Instance(self.hi, [self.a, self.a]) # H[A, A]
self.hbb = Instance(self.hi, [self.b, self.b]) # H[B, B]
self.hts = Instance(self.hi, [self.t, self.s]) # H[T, S]
self.had = Instance(self.hi, [self.a, self.d]) # H[A, D]
self.lsta = Instance(self.std_listi, [self.a]) # List[A]
self.lstb = Instance(self.std_listi, [self.b]) # List[B]
self.type_a = TypeType.make_normalized(self.a)
self.type_b = TypeType.make_normalized(self.b)
self.type_c = TypeType.make_normalized(self.c)
self.type_d = TypeType.make_normalized(self.d)
self.type_t = TypeType.make_normalized(self.t)
self.type_any = TypeType.make_normalized(self.anyt)
# Helper methods
def callable(self, *a: Type) -> CallableType:
"""callable(a1, ..., an, r) constructs a callable with argument types
a1, ... an and return type r.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.function)
def callable_type(self, *a: Type) -> CallableType:
"""callable_type(a1, ..., an, r) constructs a callable with
argument types a1, ... an and return type r, and which
represents a type.
"""
return CallableType(list(a[:-1]), [ARG_POS] * (len(a) - 1),
[None] * (len(a) - 1), a[-1], self.type_type)
def callable_default(self, min_args: int, *a: Type) -> CallableType:
"""callable_default(min_args, a1, ..., an, r) constructs a
callable with argument types a1, ... an and return type r,
with min_args mandatory fixed arguments.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args + [ARG_OPT] * (n - min_args),
[None] * n,
a[-1], self.function)
def callable_var_arg(self, min_args: int, *a: Type) -> CallableType:
"""callable_var_arg(min_args, a1, ..., an, r) constructs a callable
with argument types a1, ... *an and return type r.
"""
n = len(a) - 1
return CallableType(list(a[:-1]),
[ARG_POS] * min_args +
[ARG_OPT] * (n - 1 - min_args) +
[ARG_STAR], [None] * n,
a[-1], self.function)
def make_type_info(self, name: str,
module_name: Optional[str] = None,
is_abstract: bool = False,
mro: Optional[List[TypeInfo]] = None,
bases: Optional[List[Instance]] = None,
typevars: Optional[List[str]] = None,
variances: Optional[List[int]] = None) -> TypeInfo:
"""Make a TypeInfo suitable for use in unit tests."""
class_def = ClassDef(name, Block([]), None, [])
class_def.fullname = name
if module_name is None:
if '.' in name:
module_name = name.rsplit('.', 1)[0]
else:
module_name = '__main__'
if typevars:
v = [] # type: List[TypeVarDef]
for id, n in enumerate(typevars, 1):
if variances:
variance = variances[id - 1]
else:
variance = COVARIANT
v.append(TypeVarDef(n, n, id, [], self.o, variance=variance))
class_def.type_vars = v
info = TypeInfo(SymbolTable(), class_def, module_name)
if mro is None:
mro = []
if name != 'builtins.object':
mro.append(self.oi)
info.mro = [info] + mro
if bases is None:
if mro:
# By default, assume that there is a single non-generic base.
bases = [Instance(mro[0], [])]
else:
bases = []
info.bases = bases
return info
class InterfaceTypeFixture(TypeFixture):
"""Extension of TypeFixture that contains additional generic
interface types."""
def __init__(self) -> None:
super().__init__()
# GF[T]
self.gfi = self.make_type_info('GF', typevars=['T'], is_abstract=True)
# M1 <: GF[A]
self.m1i = self.make_type_info('M1',
is_abstract=True,
mro=[self.gfi, self.oi],
bases=[Instance(self.gfi, [self.a])])
self.gfa = Instance(self.gfi, [self.a]) # GF[A]
self.gfb = Instance(self.gfi, [self.b]) # GF[B]
self.m1 = Instance(self.m1i, []) # M1
| 43.421456 | 89 | 0.502427 |
acde8dabe60c4030b027c3ff1a09523e3cfef6cd | 924 | py | Python | adjunct/time.py | kgaughan/adjunct | b4e845d3b9c0648e6ac46ee25e484e9660f3924e | [
"MIT"
] | null | null | null | adjunct/time.py | kgaughan/adjunct | b4e845d3b9c0648e6ac46ee25e484e9660f3924e | [
"MIT"
] | 1 | 2019-02-23T14:41:35.000Z | 2020-06-15T13:00:46.000Z | adjunct/time.py | kgaughan/adjunct | b4e845d3b9c0648e6ac46ee25e484e9660f3924e | [
"MIT"
] | null | null | null | """
Date/time utilities.
"""
import datetime
def date(dt, fmt="%Y-%m-%dT%H:%M:%S%z", tz=datetime.timezone.utc):
"""
Formatting of SQLite timestamps.
SQLite timestamps are taken to be in UTC. If you want them adjusted to
another timezone, pass a `tzinfo` object representing that timezone in
the `tz` parameter. The `fmt` parameter specifies a `strftime` date format
string; it defaults to the `ISO 8601`_/`RFC 3339`_ date format.
.. _ISO 8601: http://en.wikipedia.org/wiki/ISO_8601
.. _RFC 3339: http://tools.ietf.org/html/rfc3339
"""
return parse_dt(dt, tz).strftime(fmt)
def parse_dt(dt, tz=datetime.timezone.utc):
"""
Parse an SQLite datetime, treating it as UTC.
If you want it treated naively, pass `None` as the timezone.
"""
parsed = datetime.datetime.strptime(dt, "%Y-%m-%d %H:%M:%S")
return parsed if tz is None else parsed.replace(tzinfo=tz)
| 29.806452 | 78 | 0.672078 |
acde8efe79616d08343fbe4c068fe0c99fbd537b | 9,719 | py | Python | train.py | JiangChongCong/ENet-SAD_Pytorch | a3cc4de9bf628cfefeafdd56d776ae38ad6d000a | [
"MIT"
] | 1 | 2020-09-14T12:44:35.000Z | 2020-09-14T12:44:35.000Z | train.py | noticeable/ENet-SAD_Pytorch | f4e07c6298cafffbfd33fbd006ebffeec99e7432 | [
"MIT"
] | null | null | null | train.py | noticeable/ENet-SAD_Pytorch | f4e07c6298cafffbfd33fbd006ebffeec99e7432 | [
"MIT"
] | null | null | null | import argparse
import json
import os
import shutil
import time
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from config import *
import dataset
from model import SCNN
from model_ENET_SAD import ENet_SAD
from utils.tensorboard import TensorBoard
from utils.transforms import *
from utils.lr_scheduler import PolyLR
from multiprocessing import Process, JoinableQueue
from threading import Lock
import pickle
#from torch.multiprocessing import Process, SimpleQueue
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", type=str, default="./experiments/exp0")
parser.add_argument("--resume", "-r", action="store_true")
args = parser.parse_args()
return args
args = parse_args()
# ------------ config ------------
exp_dir = args.exp_dir
while exp_dir[-1]=='/':
exp_dir = exp_dir[:-1]
exp_name = exp_dir.split('/')[-1]
with open(os.path.join(exp_dir, "cfg.json")) as f:
exp_cfg = json.load(f)
resize_shape = tuple(exp_cfg['dataset']['resize_shape'])
device = torch.device(exp_cfg['device'])
tensorboard = TensorBoard(exp_dir)
# ------------ train data ------------
# # CULane mean, std
mean=(0.3598, 0.3653, 0.3662)
std=(0.2573, 0.2663, 0.2756)
# Imagenet mean, std
# mean=(0.485, 0.456, 0.406)
# std=(0.229, 0.224, 0.225)
transform_train = Compose(Resize(resize_shape), Rotation(2), ToTensor(),
Normalize(mean=mean, std=std))
dataset_name = exp_cfg['dataset'].pop('dataset_name')
Dataset_Type = getattr(dataset, dataset_name)
train_dataset = Dataset_Type(Dataset_Path[dataset_name], "train", transform_train)
train_loader = DataLoader(train_dataset, batch_size=exp_cfg['dataset']['batch_size'], shuffle=True, collate_fn=train_dataset.collate, num_workers=6)
# ------------ val data ------------
transform_val_img = Resize(resize_shape)
transform_val_x = Compose(ToTensor(), Normalize(mean=mean, std=std))
transform_val = Compose(transform_val_img, transform_val_x)
val_dataset = Dataset_Type(Dataset_Path[dataset_name], "val", transform_val)
val_loader = DataLoader(val_dataset, batch_size=8, collate_fn=val_dataset.collate, num_workers=4)
# ------------ preparation ------------
if exp_cfg['model'] == "scnn":
net = SCNN(resize_shape, pretrained=True)
elif exp_cfg['model'] == "enet_sad":
net = ENet_SAD(resize_shape, sad=True)
else:
raise Exception("Model not match. 'model' in 'cfg.json' should be 'scnn' or 'enet_sad'.")
net = net.to(device)
net = torch.nn.DataParallel(net)
optimizer = optim.SGD(net.parameters(), **exp_cfg['optim'])
lr_scheduler = PolyLR(optimizer, 0.9, **exp_cfg['lr_scheduler'])
best_val_loss = 1e6
"""
def batch_processor(arg):
b_queue, data_loader = arg
while True:
if b_queue.empty():
sample = next(data_loader)
b_queue.put(sample)
b_queue.join()
"""
def train(epoch):
print("Train Epoch: {}".format(epoch))
net.train()
train_loss = 0
train_loss_seg = 0
train_loss_exist = 0
progressbar = tqdm(range(len(train_loader)))
"""
batch_queue = JoinableQueue()
data_loader = iter(train_loader)
p = Process(target=batch_processor, args=((batch_queue, data_loader),))
p.start()
"""
for batch_idx, sample in enumerate(train_loader):
"""
for batch_idx in range(len(train_loader)):
sample = batch_queue.get()
batch_queue.task_done()
"""
img = sample['img'].to(device)
segLabel = sample['segLabel'].to(device)
exist = sample['exist'].to(device)
optimizer.zero_grad()
if exp_cfg['model'] == "scnn":
seg_pred, exist_pred, loss_seg, loss_exist, loss = net(img, segLabel, exist)
elif exp_cfg['model'] == "enet_sad":
if (epoch * len(train_loader) + batch_idx) < exp_cfg['sad_start_iter']:
seg_pred, exist_pred, loss_seg, loss_exist, loss = net(img, segLabel, exist, False)
else:
print("sad activated")
seg_pred, exist_pred, loss_seg, loss_exist, loss = net(img, segLabel, exist, True)
if isinstance(net, torch.nn.DataParallel):
loss_seg = loss_seg.sum()
loss_exist = loss_exist.sum()
loss = loss.sum()
loss.backward()
optimizer.step()
lr_scheduler.step()
iter_idx = epoch * len(train_loader) + batch_idx
train_loss = loss.item()
train_loss_seg = loss_seg.item()
train_loss_exist = loss_exist.item()
progressbar.set_description("batch loss: {:.3f}".format(loss.item()))
progressbar.update(1)
lr = optimizer.param_groups[0]['lr']
tensorboard.scalar_summary(exp_name + "/train_loss", train_loss, iter_idx)
tensorboard.scalar_summary(exp_name + "/train_loss_seg", train_loss_seg, iter_idx)
tensorboard.scalar_summary(exp_name + "/train_loss_exist", train_loss_exist, iter_idx)
tensorboard.scalar_summary(exp_name + "/learning_rate", lr, iter_idx)
"""
p.join(10)
"""
progressbar.close()
tensorboard.writer.flush()
if epoch % 1 == 0:
save_dict = {
"epoch": epoch,
"net": net.module.state_dict() if isinstance(net, torch.nn.DataParallel) else net.state_dict(),
"optim": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"best_val_loss": best_val_loss
}
save_name = os.path.join(exp_dir, exp_name + '.pth')
torch.save(save_dict, save_name)
print("model is saved: {}".format(save_name))
print("------------------------\n")
def val(epoch):
global best_val_loss
print("Val Epoch: {}".format(epoch))
net.eval()
val_loss = 0
val_loss_seg = 0
val_loss_exist = 0
progressbar = tqdm(range(len(val_loader)))
with torch.no_grad():
for batch_idx, sample in enumerate(val_loader):
img = sample['img'].to(device)
segLabel = sample['segLabel'].to(device)
exist = sample['exist'].to(device)
seg_pred, exist_pred, loss_seg, loss_exist, loss = net(img, segLabel, exist)
if isinstance(net, torch.nn.DataParallel):
loss_seg = loss_seg.sum()
loss_exist = loss_exist.sum()
loss = loss.sum()
# visualize validation every 5 frame, 50 frames in all
gap_num = 5
if batch_idx%gap_num == 0 and batch_idx < 50 * gap_num:
origin_imgs = []
seg_pred = seg_pred.detach().cpu().numpy()
exist_pred = exist_pred.detach().cpu().numpy()
for b in range(len(img)):
img_name = sample['img_name'][b]
img = cv2.imread(img_name)
img = transform_val_img({'img': img})['img']
lane_img = np.zeros_like(img)
color = np.array([[255, 125, 0], [0, 255, 0], [0, 0, 255], [0, 255, 255]], dtype='uint8')
coord_mask = np.argmax(seg_pred[b], axis=0)
for i in range(0, 4):
if exist_pred[b, i] > 0.5:
lane_img[coord_mask==(i+1)] = color[i]
img = cv2.addWeighted(src1=lane_img, alpha=0.8, src2=img, beta=1., gamma=0.)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
lane_img = cv2.cvtColor(lane_img, cv2.COLOR_BGR2RGB)
cv2.putText(lane_img, "{}".format([1 if exist_pred[b, i]>0.5 else 0 for i in range(4)]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 1.1, (255, 255, 255), 2)
origin_imgs.append(img)
origin_imgs.append(lane_img)
tensorboard.image_summary("img_{}".format(batch_idx), origin_imgs, epoch)
val_loss += loss.item()
val_loss_seg += loss_seg.item()
val_loss_exist += loss_exist.item()
progressbar.set_description("batch loss: {:.3f}".format(loss.item()))
progressbar.update(1)
progressbar.close()
iter_idx = (epoch + 1) * len(train_loader) # keep align with training process iter_idx
tensorboard.scalar_summary("val_loss", val_loss, iter_idx)
tensorboard.scalar_summary("val_loss_seg", val_loss_seg, iter_idx)
tensorboard.scalar_summary("val_loss_exist", val_loss_exist, iter_idx)
tensorboard.writer.flush()
print("------------------------\n")
if val_loss < best_val_loss:
best_val_loss = val_loss
save_name = os.path.join(exp_dir, exp_name + '.pth')
copy_name = os.path.join(exp_dir, exp_name + '_best.pth')
shutil.copyfile(save_name, copy_name)
def main():
global best_val_loss
if args.resume:
save_dict = torch.load(os.path.join(exp_dir, exp_name + '.pth'))
if isinstance(net, torch.nn.DataParallel):
net.module.load_state_dict(save_dict['net'])
else:
net.load_state_dict(save_dict['net'])
optimizer.load_state_dict(save_dict['optim'])
lr_scheduler.load_state_dict(save_dict['lr_scheduler'])
start_epoch = save_dict['epoch'] + 1
best_val_loss = save_dict.get("best_val_loss", 1e6)
else:
start_epoch = 0
exp_cfg['MAX_EPOCHES'] = int(np.ceil(exp_cfg['lr_scheduler']['max_iter'] / len(train_loader)))
for epoch in range(start_epoch, exp_cfg['MAX_EPOCHES']):
#val(epoch)
train(epoch)
if epoch % 1 == 0:
print("\nValidation For Experiment: ", exp_dir)
print(time.strftime('%H:%M:%S', time.localtime()))
val(epoch)
if __name__ == "__main__":
main()
| 35.863469 | 169 | 0.618171 |
acde90090b299f91164b7917b62bf9a0910e9915 | 58,981 | py | Python | tests/model/test_model.py | meelanp/zulip-terminal | 49d3fa5244eab398fa2597156e7309f5bfa6c24c | [
"Apache-2.0"
] | null | null | null | tests/model/test_model.py | meelanp/zulip-terminal | 49d3fa5244eab398fa2597156e7309f5bfa6c24c | [
"Apache-2.0"
] | null | null | null | tests/model/test_model.py | meelanp/zulip-terminal | 49d3fa5244eab398fa2597156e7309f5bfa6c24c | [
"Apache-2.0"
] | null | null | null | import json
from typing import Any
import pytest
from zulip import ZulipError
from zulipterminal.helper import initial_index, powerset
from zulipterminal.model import Model, ServerConnectionFailure
class TestModel:
@pytest.fixture(autouse=True)
def mock_external_classes(self, mocker: Any) -> None:
self.urlparse = mocker.patch('urllib.parse.urlparse')
self.controller = mocker.patch('zulipterminal.core.'
'Controller',
return_value=None)
self.client = mocker.patch('zulipterminal.core.'
'Controller.client')
self.client.base_url = 'chat.zulip.zulip'
mocker.patch('zulipterminal.model.Model._start_presence_updates')
@pytest.fixture
def model(self, mocker, initial_data, user_profile):
mocker.patch('zulipterminal.model.Model.get_messages',
return_value='')
self.client.register.return_value = initial_data
mocker.patch('zulipterminal.model.Model.get_all_users',
return_value=[])
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
# NOTE: PATCH WHERE USED NOT WHERE DEFINED
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
self.client.get_profile.return_value = user_profile
model = Model(self.controller)
return model
def test_init(self, model, initial_data, user_profile):
assert hasattr(model, 'controller')
assert hasattr(model, 'client')
assert model.msg_view is None
assert model.msg_list is None
assert model.narrow == []
assert model.found_newest is False
assert model.stream_id == -1
assert model.stream_dict == {}
assert model.recipients == frozenset()
assert model.index == initial_index
model.get_messages.assert_called_once_with(num_before=30,
num_after=10,
anchor=None)
assert model.initial_data == initial_data
assert model.user_id == user_profile['user_id']
assert model.user_full_name == user_profile['full_name']
assert model.user_email == user_profile['email']
assert model.server_name == initial_data['realm_name']
# FIXME Add test here for model.server_url
model.get_all_users.assert_called_once_with()
assert model.users == []
(model._stream_info_from_subscriptions.
assert_called_once_with(initial_data['subscriptions']))
assert model.pinned_streams == []
assert model.unpinned_streams == []
self.classify_unread_counts.assert_called_once_with(model)
assert model.unread_counts == []
def test_init_InvalidAPIKey_response(self, mocker, initial_data):
# Both network calls indicate the same response
mocker.patch('zulipterminal.model.Model.get_messages',
return_value='Invalid API key')
mocker.patch('zulipterminal.model.Model._register_desired_events',
return_value='Invalid API key')
mocker.patch('zulipterminal.model.Model.get_all_users',
return_value=[])
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
with pytest.raises(ServerConnectionFailure) as e:
model = Model(self.controller)
assert str(e.value) == 'Invalid API key (get_messages, register)'
def test_init_ZulipError_exception(self, mocker, initial_data,
exception_text="X"):
# Both network calls fail, resulting in exceptions
mocker.patch('zulipterminal.model.Model.get_messages',
side_effect=ZulipError(exception_text))
mocker.patch('zulipterminal.model.Model._register_desired_events',
side_effect=ZulipError(exception_text))
mocker.patch('zulipterminal.model.Model.get_all_users',
return_value=[])
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
with pytest.raises(ServerConnectionFailure) as e:
model = Model(self.controller)
assert str(e.value) == exception_text + ' (get_messages, register)'
def test_register_initial_desired_events(self, mocker, initial_data):
mocker.patch('zulipterminal.model.Model.get_messages',
return_value='')
mocker.patch('zulipterminal.model.Model.get_all_users')
mocker.patch('zulipterminal.model.Model.fetch_all_topics')
self.client.register.return_value = initial_data
model = Model(self.controller)
event_types = [
'message',
'update_message',
'reaction',
'subscription',
'typing',
'update_message_flags',
]
fetch_event_types = [
'realm',
'presence',
'subscription',
'message',
'update_message_flags',
'muted_topics',
'realm_user',
'realm_user_groups',
]
model.client.register.assert_called_once_with(
event_types=event_types,
fetch_event_types=fetch_event_types,
apply_markdown=True,
client_gravatar=True)
@pytest.mark.parametrize('msg_id', [1, 5, set()])
@pytest.mark.parametrize('narrow', [
[],
[['stream', 'hello world']],
[['stream', 'hello world'], ['topic', "what's it all about?"]],
[['pm_with', 'FOO@zulip.com']],
[['pm_with', 'Foo@zulip.com, Bar@zulip.com']],
[['is', 'private']],
[['is', 'starred']],
])
def test_get_focus_in_current_narrow_individually(self,
model, msg_id, narrow):
model.index = {'pointer': {str(narrow): msg_id}}
model.narrow = narrow
assert model.get_focus_in_current_narrow() == msg_id
@pytest.mark.parametrize('msg_id', [1, 5])
@pytest.mark.parametrize('narrow', [
[],
[['stream', 'hello world']],
[['stream', 'hello world'], ['topic', "what's it all about?"]],
[['pm_with', 'FOO@zulip.com']],
[['pm_with', 'Foo@zulip.com, Bar@zulip.com']],
[['is', 'private']],
[['is', 'starred']],
])
def test_set_focus_in_current_narrow(self, mocker, model, narrow, msg_id):
from collections import defaultdict
model.index = dict(pointer=defaultdict(set))
model.narrow = narrow
model.set_focus_in_current_narrow(msg_id)
assert model.index['pointer'][str(narrow)] == msg_id
@pytest.mark.parametrize('narrow, is_search_narrow', [
([], False),
([['search', 'FOO']], True),
([['is', 'private']], False),
([['is', 'private'], ['search', 'FOO']], True),
([['search', 'FOO'], ['is', 'private']], True),
([['stream', 'PTEST']], False),
([['stream', 'PTEST'], ['search', 'FOO']], True),
([['stream', '7'], ['topic', 'Test']], False),
([['stream', '7'], ['topic', 'Test'], ['search', 'FOO']], True),
([['stream', '7'], ['search', 'FOO'], ['topic', 'Test']], True),
([['search', 'FOO'], ['stream', '7'], ['topic', 'Test']], True)
])
def test_is_search_narrow(self, model, narrow, is_search_narrow):
model.narrow = narrow
assert model.is_search_narrow() == is_search_narrow
@pytest.mark.parametrize('bad_args', [
dict(topic='some topic'),
dict(stream='foo', pm_with='someone'),
dict(topic='blah', pm_with='someone'),
dict(pm_with='someone', topic='foo')
])
def test_set_narrow_bad_input(self, model, bad_args):
with pytest.raises(RuntimeError):
model.set_narrow(**bad_args)
@pytest.mark.parametrize('narrow, good_args', [
([], dict()),
([['stream', 'some stream']], dict(stream='some stream')),
([['stream', 'some stream'], ['topic', 'some topic']],
dict(stream='some stream', topic='some topic')),
([['is', 'starred']], dict(starred=True)),
([['is', 'mentioned']], dict(mentioned=True)),
([['is', 'private']], dict(pms=True)),
([['pm_with', 'FOO@zulip.com']], dict(pm_with='FOO@zulip.com')),
])
def test_set_narrow_already_set(self, model, narrow, good_args):
model.narrow = narrow
assert model.set_narrow(**good_args)
assert model.narrow == narrow
@pytest.mark.parametrize('initial_narrow, narrow, good_args', [
([['stream', 'foo']], [], dict()),
([], [['stream', 'some stream']], dict(stream='some stream')),
([], [['stream', 'some stream'], ['topic', 'some topic']],
dict(stream='some stream', topic='some topic')),
([], [['is', 'starred']], dict(starred=True)),
([], [['is', 'mentioned']], dict(mentioned=True)),
([], [['is', 'private']], dict(pms=True)),
([], [['pm_with', 'FOOBOO@gmail.com']],
dict(pm_with='FOOBOO@gmail.com')),
])
def test_set_narrow_not_already_set(self, model, initial_narrow, narrow,
good_args, user_dict):
model.narrow = initial_narrow
model.user_dict = user_dict
assert not model.set_narrow(**good_args)
assert model.narrow != initial_narrow
assert model.narrow == narrow
# FIXME: Add assert for recipients being updated (other tests too?)
@pytest.mark.parametrize("narrow, index, current_ids", [
([], {
"all_msg_ids": {0, 1}
}, {0, 1}),
([['stream', 'FOO']], {
"stream_msg_ids_by_stream_id": {
1: {0, 1}
}
}, {0, 1}),
([['stream', 'FOO'],
['topic', 'BOO']], {
'topic_msg_ids': {
1: {
'BOO': {0, 1}
}
}
}, {0, 1}),
([['stream', 'FOO'], # Covers one empty-set case
['topic', 'BOOBOO']], {
'topic_msg_ids': {
1: {
'BOO': {0, 1}
}
}
}, set()),
([['is', 'private']], {
'private_msg_ids': {0, 1}
}, {0, 1}),
([['pm_with', 'FOO@zulip.com']], {
'private_msg_ids_by_user_ids': {
frozenset({1, 2}): {0, 1}
}
}, {0, 1}),
([['pm_with', 'FOO@zulip.com']], { # Covers recipient empty-set case
'private_msg_ids_by_user_ids': {
frozenset({1, 3}): {0, 1} # NOTE {1,3} not {1,2}
}
}, set()),
([['search', 'FOO']], {
'search': {0, 1}
}, {0, 1}),
([['is', 'starred']], {
'starred_msg_ids': {0, 1}
}, {0, 1}),
([['stream', 'FOO'], ['search', 'FOO']], {
"stream_msg_ids_by_stream_id": {
1: {0, 1, 2} # NOTE Should not be returned
},
'search': {0, 1},
}, {0, 1}),
([['is', 'mentioned']], {
'mentioned_msg_ids': {0, 1}
}, {0, 1}),
])
def test_get_message_ids_in_current_narrow(self, mocker, model,
narrow, index, current_ids):
model.recipients = frozenset({1, 2})
model.stream_id = 1
model.narrow = narrow
model.index = index
assert current_ids == model.get_message_ids_in_current_narrow()
@pytest.mark.parametrize("response, expected_index, return_value", [
({'result': 'success', 'topics': [{'name': 'Foo'}, {'name': 'Boo'}]},
{23: ['Foo', 'Boo']}, ''),
({'result': 'success', 'topics': []},
{23: []}, ''),
({'result': 'failure', 'msg': 'Some Error', 'topics': []},
{23: []}, 'Some Error')
])
def test_get_topics_in_streams(self, mocker, response, model, return_value,
expected_index) -> None:
self.client.get_stream_topics = mocker.Mock(return_value=response)
result = model.get_topics_in_stream([23])
self.client.get_stream_topics.assert_called_once_with(23)
assert model.index['topics'] == expected_index
assert result == return_value
@pytest.mark.parametrize("user_key", ['user_id', 'id'])
@pytest.mark.parametrize("msg_id, existing_reactions, expected_method", [
(5, [], 'POST'),
(5, [dict(user='me', emoji_code='1f44d')], 'DELETE'),
(5, [dict(user='not me', emoji_code='1f44d')], 'POST'),
(5, [dict(user='me', emoji_code='1f614')], 'POST'),
(5, [dict(user='not me', emoji_code='1f614')], 'POST'),
])
def test_react_to_message_with_thumbs_up(self, model,
user_key,
msg_id,
existing_reactions,
expected_method):
full_existing_reactions = [
dict(er, user={user_key: (model.user_id if er['user'] == 'me'
else model.user_id + 1)}) # non-match
for er in existing_reactions
]
message = dict(
id=msg_id,
reactions=full_existing_reactions)
reaction_spec = dict(
emoji_name='thumbs_up',
reaction_type='unicode_emoji',
emoji_code='1f44d',
message_id=str(msg_id))
model.react_to_message(message, 'thumbs_up')
if expected_method == 'POST':
model.client.add_reaction.assert_called_once_with(reaction_spec)
model.client.delete_reaction.assert_not_called()
elif expected_method == 'DELETE':
model.client.remove_reaction.assert_called_once_with(reaction_spec)
model.client.add_reaction.assert_not_called()
def test_react_to_message_for_not_thumbs_up(self, model):
with pytest.raises(AssertionError):
model.react_to_message(dict(), 'x')
@pytest.mark.parametrize('response, return_value', [
({'result': 'success'}, True),
({'result': 'some_failure'}, False),
])
def test_send_private_message(self, mocker, model,
response, return_value,
content="hi!",
recipients="notification-bot@zulip.com"):
self.client.send_message = mocker.Mock(return_value=response)
result = model.send_private_message(recipients, content)
req = dict(type='private', to=recipients, content=content)
self.client.send_message.assert_called_once_with(req)
assert result == return_value
@pytest.mark.parametrize('response, return_value', [
({'result': 'success'}, True),
({'result': 'some_failure'}, False),
])
def test_send_stream_message(self, mocker, model,
response, return_value,
content="hi!",
stream="foo", topic="bar"):
self.client.send_message = mocker.Mock(return_value=response)
result = model.send_stream_message(stream, topic, content)
req = dict(type='stream', to=stream, subject=topic, content=content)
self.client.send_message.assert_called_once_with(req)
assert result == return_value
@pytest.mark.parametrize('response, return_value', [
({'result': 'success'}, True),
({'result': 'some_failure'}, False),
])
def test_update_private_message(self, mocker, model,
response, return_value,
content="hi!",
msg_id=1):
self.client.update_message = mocker.Mock(return_value=response)
result = model.update_private_message(msg_id, content)
req = dict(message_id=msg_id, content=content)
self.client.update_message.assert_called_once_with(req)
assert result == return_value
@pytest.mark.parametrize('response, return_value', [
({'result': 'success'}, True),
({'result': 'some_failure'}, False),
])
def test_update_stream_message(self, mocker, model,
response, return_value,
content="hi!",
subject='Hello',
msg_id=1):
self.client.update_message = mocker.Mock(return_value=response)
result = model.update_stream_message(subject, msg_id, content)
req = dict(subject=subject, propagate_mode="change_one",
message_id=msg_id, content=content)
self.client.update_message.assert_called_once_with(req)
assert result == return_value
# NOTE: This tests only getting next-unread, not a fixed anchor
def test_success_get_messages(self, mocker, messages_successful_response,
index_all_messages, initial_data,
num_before=30, num_after=10):
self.client.register.return_value = initial_data
mocker.patch('zulipterminal.model.Model.get_all_users',
return_value=[])
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
# Setup mocks before calling get_messages
self.client.get_messages.return_value = messages_successful_response
mocker.patch('zulipterminal.model.index_messages',
return_value=index_all_messages)
model = Model(self.controller)
request = {
'anchor': 0,
'num_before': num_before,
'num_after': num_after,
'apply_markdown': True,
'use_first_unread_anchor': True,
'client_gravatar': True,
'narrow': json.dumps(model.narrow),
}
(model.client.get_messages.
assert_called_once_with(message_filters=request))
assert model.index == index_all_messages
anchor = messages_successful_response['anchor']
if anchor < 10000000000000000:
assert model.index['pointer'][str(model.narrow)] == anchor
assert model.found_newest is True
def test_get_message_false_first_anchor(
self, mocker, messages_successful_response, index_all_messages,
initial_data, num_before=30, num_after=10
):
# TEST FOR get_messages() with first_anchor=False
# and anchor=0
# Initialize Model
self.client.register.return_value = initial_data
mocker.patch('zulipterminal.model.Model.get_all_users',
return_value=[])
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
# Setup mocks before calling get_messages
messages_successful_response['anchor'] = 0
self.client.get_messages.return_value = messages_successful_response
mocker.patch('zulipterminal.model.index_messages',
return_value=index_all_messages)
model = Model(self.controller)
model.get_messages(num_before=num_before, num_after=num_after,
anchor=0)
self.client.get_messages.return_value = messages_successful_response
# anchor should have remained the same
anchor = messages_successful_response['anchor']
assert model.index['pointer'][str(model.narrow)] == 0
# TEST `query_range` < no of messages received
model.found_newest = False # RESET model.found_newest value
model.get_messages(num_after=0, num_before=0, anchor=0)
assert model.found_newest is False
# FIXME This only tests the case where the get_messages is in __init__
def test_fail_get_messages(self, mocker, error_response,
initial_data, num_before=30, num_after=10):
# Initialize Model
self.client.register.return_value = initial_data
mocker.patch('zulipterminal.model.Model.get_all_users',
return_value=[])
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
# Setup mock before calling get_messages
# FIXME This has no influence on the result
# self.client.do_api_query.return_value = error_response
with pytest.raises(ServerConnectionFailure):
model = Model(self.controller)
@pytest.mark.parametrize('initial_muted_streams, value', [
({315}, True),
({205, 315}, False),
(set(), True),
({205}, False),
], ids=['muting_205', 'unmuting_205', 'first_muted_205',
'last_unmuted_205'])
def test_toggle_stream_muted_status(self, mocker, model,
initial_muted_streams, value):
model.muted_streams = initial_muted_streams
model.client.update_subscription_settings.return_value = {
'result': "success"
}
model.toggle_stream_muted_status(205)
request = [{
'stream_id': 205,
'property': 'is_muted',
'value': value
}]
(model.client.update_subscription_settings
.assert_called_once_with(request))
@pytest.mark.parametrize('flags_before, expected_operator', [
([], 'add'),
(['starred'], 'remove'),
(['read'], 'add'),
(['read', 'starred'], 'remove'),
(['starred', 'read'], 'remove'),
])
def test_toggle_message_star_status(self, mocker, model, flags_before,
expected_operator):
mocker.patch('zulip.Client')
message = {
'id': 99,
'flags': flags_before,
}
model.toggle_message_star_status(message)
request = {
'flag': 'starred',
'messages': [99],
'op': expected_operator
}
model.client.update_message_flags.assert_called_once_with(request)
def test_mark_message_ids_as_read(self, model, mocker: Any) -> None:
mock_api_query = mocker.patch('zulipterminal.core.Controller'
'.client.update_message_flags')
model.mark_message_ids_as_read([1, 2])
mock_api_query.assert_called_once_with(
{'flag': 'read', 'messages': [1, 2], 'op': 'add'},
)
def test_mark_message_ids_as_read_empty_msg_list(self, model) -> None:
assert model.mark_message_ids_as_read([]) is None
def test__update_initial_data(self, model, initial_data):
assert model.initial_data == initial_data
def test__update_initial_data_raises_exception(self, mocker, initial_data):
# Initialize Model
mocker.patch('zulipterminal.model.Model.get_messages', return_value='')
mocker.patch('zulipterminal.model.Model.get_all_users',
return_value=[])
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
# Setup mocks before calling get_messages
self.client.register.return_value = initial_data
self.client.get_members.return_value = {
'members': initial_data['realm_users']}
model = Model(self.controller)
# Test if raises Exception
self.client.register.side_effect = Exception()
with pytest.raises(Exception):
model._update_initial_data()
def test__group_info_from_realm_user_groups(self, model,
user_groups_fixture):
user_group_names = model._group_info_from_realm_user_groups(
user_groups_fixture)
assert model.user_group_by_id == {
group['id']: {'members': group['members'],
'name': group['name'],
'description': group['description']}
for group in user_groups_fixture
}
assert user_group_names == ['Group 1', 'Group 2', 'Group 3', 'Group 4']
def test_get_all_users(self, mocker, initial_data, user_list, user_dict,
user_id):
mocker.patch('zulipterminal.model.Model.get_messages', return_value='')
self.client.register.return_value = initial_data
mocker.patch('zulipterminal.model.Model.'
'_stream_info_from_subscriptions',
return_value=({}, set(), [], []))
self.classify_unread_counts = mocker.patch(
'zulipterminal.model.classify_unread_counts',
return_value=[])
model = Model(self.controller)
assert model.user_dict == user_dict
assert model.users == user_list
@pytest.mark.parametrize('muted', powerset([1, 2, 99, 1000]))
def test__stream_info_from_subscriptions(self, initial_data, streams,
muted):
subs = [dict(entry, in_home_view=entry['stream_id'] not in muted)
for entry in initial_data['subscriptions']]
by_id, muted_streams, pinned, unpinned = (
Model._stream_info_from_subscriptions(subs))
assert len(by_id)
assert all(msg_id == msg['stream_id'] for msg_id, msg in by_id.items())
assert muted_streams == muted
assert pinned == [] # FIXME generalize/parametrize
assert unpinned == streams # FIXME generalize/parametrize
def test__handle_message_event_with_Falsey_log(self, mocker,
model, message_fixture):
model.found_newest = True
mocker.patch('zulipterminal.model.Model._update_topic_index')
index_msg = mocker.patch('zulipterminal.model.index_messages',
return_value={})
model.msg_list = mocker.Mock(log=[])
create_msg_box_list = mocker.patch('zulipterminal.model.'
'create_msg_box_list',
return_value=["msg_w"])
model.notify_user = mocker.Mock()
event = {'message': message_fixture}
model._handle_message_event(event)
assert len(model.msg_list.log) == 1 # Added "msg_w" element
model.notify_user.assert_called_once_with(event['message'])
(create_msg_box_list.
assert_called_once_with(model, [message_fixture['id']],
last_message=None))
def test__handle_message_event_with_valid_log(self, mocker,
model, message_fixture):
model.found_newest = True
mocker.patch('zulipterminal.model.Model._update_topic_index')
index_msg = mocker.patch('zulipterminal.model.index_messages',
return_value={})
model.msg_list = mocker.Mock(log=[mocker.Mock()])
create_msg_box_list = mocker.patch('zulipterminal.model.'
'create_msg_box_list',
return_value=["msg_w"])
model.notify_user = mocker.Mock()
event = {'message': message_fixture}
model._handle_message_event(event)
assert len(model.msg_list.log) == 2 # Added "msg_w" element
model.notify_user.assert_called_once_with(event['message'])
# NOTE: So we expect the first element *was* the last_message parameter
expected_last_msg = model.msg_list.log[0].original_widget.message
(create_msg_box_list.
assert_called_once_with(model, [message_fixture['id']],
last_message=expected_last_msg))
def test__handle_message_event_with_flags(self, mocker,
model, message_fixture):
model.found_newest = True
mocker.patch('zulipterminal.model.Model._update_topic_index')
index_msg = mocker.patch('zulipterminal.model.index_messages',
return_value={})
model.msg_list = mocker.Mock()
create_msg_box_list = mocker.patch('zulipterminal.model.'
'create_msg_box_list',
return_value=["msg_w"])
model.notify_user = mocker.Mock()
model.msg_list.log = [mocker.Mock()]
set_count = mocker.patch('zulipterminal.model.set_count')
# Test event with flags
event = {'message': message_fixture, 'flags': ['read', 'mentioned']}
model._handle_message_event(event)
# set count not called since 'read' flag present.
set_count.assert_not_called()
# Test event without flags
model.notify_user.assert_called_once_with(event['message'])
model.msg_list.log = [mocker.Mock()]
event = {'message': message_fixture, 'flags': []}
model._handle_message_event(event)
# set count called since the message is unread.
set_count.assert_called_once_with([event['message']['id']],
self.controller, 1)
@pytest.mark.parametrize('response, narrow, recipients, log', [
({'type': 'stream', 'stream_id': 1, 'subject': 'FOO',
'id': 1}, [], frozenset(), ['msg_w']),
({'type': 'private', 'id': 1},
[['is', 'private']], frozenset(), ['msg_w']),
({'type': 'stream', 'id': 1, 'stream_id': 1, 'subject': 'FOO',
'display_recipient': 'a'},
[['stream', 'a']], frozenset(), ['msg_w']),
({'type': 'stream', 'id': 1, 'stream_id': 1, 'subject': 'b',
'display_recipient': 'a'},
[['stream', 'a'], ['topic', 'b']],
frozenset(), ['msg_w']),
({'type': 'stream', 'id': 1, 'stream_id': 1, 'subject': 'b',
'display_recipient': 'a'},
[['stream', 'c'], ['topic', 'b']],
frozenset(), []),
({'type': 'private', 'id': 1,
'display_recipient': [{'id': 5827}, {'id': 5}]},
[['pm_with', 'notification-bot@zulip.com']],
frozenset({5827, 5}), ['msg_w']),
({'type': 'private', 'id': 1},
[['is', 'search']],
frozenset(), []),
({'type': 'private', 'id': 1,
'display_recipient': [{'id': 5827}, {'id': 3212}]},
[['pm_with', 'notification-bot@zulip.com']],
frozenset({5827, 5}), []),
({'type': 'stream', 'id': 1, 'stream_id': 1, 'subject': 'c',
'display_recipient': 'a', 'flags': ['mentioned']},
[['is', 'mentioned']], frozenset(), ['msg_w']),
], ids=['stream_to_all_messages', 'private_to_all_private',
'stream_to_stream', 'stream_to_topic',
'stream_to_different_stream_same_topic',
'user_pm_x_appears_in_narrow_with_x', 'search',
'user_pm_x_does_not_appear_in_narrow_without_x',
'mentioned_msg_in_mentioned_msg_narrow'])
def test__handle_message_event(self, mocker, user_profile, response,
narrow, recipients, model, log):
model.found_newest = True
mocker.patch('zulipterminal.model.Model._update_topic_index')
index_msg = mocker.patch('zulipterminal.model.index_messages',
return_value={})
create_msg_box_list = mocker.patch('zulipterminal.model.'
'create_msg_box_list',
return_value=["msg_w"])
set_count = mocker.patch('zulipterminal.model.set_count')
model.msg_list = mocker.Mock(log=[])
model.notify_user = mocker.Mock()
model.narrow = narrow
model.recipients = recipients
model.user_id = user_profile['user_id']
event = {'message': response, 'flags': response['flags']
if 'flags' in response else []}
model._handle_message_event(event)
assert model.msg_list.log == log
set_count.assert_called_once_with([response['id']], self.controller, 1)
model.found_newest = False
model.notify_user.assert_called_once_with(response)
model._handle_message_event(event)
# LOG REMAINS THE SAME IF UPDATE IS FALSE
assert model.msg_list.log == log
@pytest.mark.parametrize(['topic_name', 'topic_order_intial',
'topic_order_final'], [
('TOPIC3', ['TOPIC2', 'TOPIC3', 'TOPIC1'],
['TOPIC3', 'TOPIC2', 'TOPIC1']),
('TOPIC1', ['TOPIC1', 'TOPIC2', 'TOPIC3'],
['TOPIC1', 'TOPIC2', 'TOPIC3']),
('TOPIC4', ['TOPIC1', 'TOPIC2', 'TOPIC3'],
['TOPIC4', 'TOPIC1', 'TOPIC2', 'TOPIC3']),
('TOPIC1', [], ['TOPIC1'])
], ids=['reorder_topic3', 'topic1_discussion_continues', 'new_topic4',
'first_topic_1'])
def test__update_topic_index(self, topic_name, topic_order_intial,
topic_order_final, model):
model.index = {
'topics': {
86: topic_order_intial,
}
}
model._update_topic_index(86, topic_name)
assert model.index['topics'][86] == topic_order_final
# TODO: Ideally message_fixture would use standardized ids?
@pytest.mark.parametrize(['user_id', 'vary_each_msg', 'stream_setting',
'types_when_notify_called'], [
(5140, {'flags': ['mentioned', 'wildcard_mentioned']}, True,
[]), # message_fixture sender_id is 5140
(5179, {'flags': ['mentioned']}, False,
['stream', 'private']),
(5179, {'flags': ['wildcard_mentioned']}, False,
['stream', 'private']),
(5179, {'flags': []}, True,
['stream']),
(5179, {'flags': []}, False,
['private']),
], ids=[
'not_notified_since_self_message',
'notified_stream_and_private_since_directly_mentioned',
'notified_stream_and_private_since_wildcard_mentioned',
'notified_stream_since_stream_has_desktop_notifications',
'notified_private_since_private_message',
])
def test_notify_users_calling_msg_type(self, mocker, model,
message_fixture,
user_id,
vary_each_msg,
stream_setting,
types_when_notify_called):
message_fixture.update(vary_each_msg)
model.user_id = user_id
if 'stream_id' in message_fixture:
model.stream_dict.update(
{message_fixture['stream_id']:
{'desktop_notifications': stream_setting}}
)
notify = mocker.patch('zulipterminal.model.notify')
model.notify_user(message_fixture)
if message_fixture['type'] in types_when_notify_called:
who = message_fixture['type']
if who == 'stream':
target = 'PTEST -> Test'
elif who == 'private':
target = 'you'
if len(message_fixture['display_recipient']) > 2:
target += ', Bar Bar'
title = 'Test Organization Name:\nFoo Foo (to {})'.format(target)
# TODO: Test message content too?
notify.assert_called_once_with(title, mocker.ANY)
else:
notify.assert_not_called
@pytest.mark.parametrize('notify_enabled, is_notify_called', [
(True, True),
(False, False),
])
def test_notify_users_enabled(self, mocker, model, message_fixture,
notify_enabled, is_notify_called):
message_fixture.update({'sender_id': 2, 'flags': ['mentioned']})
model.controller.notify_enabled = notify_enabled
model.user_id = 1
notify = mocker.patch('zulipterminal.model.notify')
model.notify_user(message_fixture)
assert notify.called == is_notify_called
@pytest.mark.parametrize('response, update_call_count, new_index', [
({ # Only subject of 1 message is updated.
'message_id': 1,
'subject': 'new subject',
'message_ids': [1],
}, 1, {
'messages': {
1: {
'id': 1,
'content': 'old content',
'subject': 'new subject'
},
2: {
'id': 2,
'content': 'old content',
'subject': 'old subject'
}},
'edited_messages': {1}
}),
({ # Subject of 2 messages is updated
'message_id': 1,
'subject': 'new subject',
'message_ids': [1, 2],
}, 2, {
'messages': {
1: {
'id': 1,
'content': 'old content',
'subject': 'new subject'
},
2: {
'id': 2,
'content': 'old content',
'subject': 'new subject'
}},
'edited_messages': {1}
}),
({ # Message content is updated
'message_id': 1,
'rendered_content': '<p>new content</p>',
}, 1, {
'messages': {
1: {
'id': 1,
'content': '<p>new content</p>',
'subject': 'old subject'
},
2: {
'id': 2,
'content': 'old content',
'subject': 'old subject'
}},
'edited_messages': {1}
}),
({ # Both message content and subject is updated.
'message_id': 1,
'rendered_content': '<p>new content</p>',
'subject': 'new subject',
'message_ids': [1],
}, 2, {
'messages': {
1: {
'id': 1,
'content': '<p>new content</p>',
'subject': 'new subject'
},
2: {
'id': 2,
'content': 'old content',
'subject': 'old subject'
}},
'edited_messages': {1}
}),
({ # Some new type of update which we don't handle yet.
'message_id': 1,
'foo': 'boo',
}, 0, {
'messages': {
1: {
'id': 1,
'content': 'old content',
'subject': 'old subject'
},
2: {
'id': 2,
'content': 'old content',
'subject': 'old subject'
}},
'edited_messages': {1}
}),
({ # message_id not present in index.
'message_id': 3,
'rendered_content': '<p>new content</p>',
'subject': 'new subject',
'message_ids': [3],
}, 0, {
'messages': {
1: {
'id': 1,
'content': 'old content',
'subject': 'old subject'
},
2: {
'id': 2,
'content': 'old content',
'subject': 'old subject'
}},
'edited_messages': set()
}),
])
def test__handle_update_message_event(self, mocker, model,
response, new_index,
update_call_count):
model.index = {
'messages': {
message_id: {
'id': message_id,
'content': 'old content',
'subject': 'old subject',
} for message_id in [1, 2]
},
'edited_messages': set()
}
mocker.patch('zulipterminal.model.Model._update_rendered_view')
model._handle_update_message_event(response)
assert model.index == new_index
assert model._update_rendered_view.call_count == update_call_count
@pytest.mark.parametrize('subject, narrow, new_log_len', [
('foo', [['stream', 'boo'], ['topic', 'foo']], 2),
('foo', [['stream', 'boo'], ['topic', 'not foo']], 1),
('foo', [], 2),
], ids=[
'msgbox_updated_in_topic_narrow',
'msgbox_removed_due_to_topic_narrow_mismatch',
'msgbox_updated_in_all_messages_narrow',
])
def test__update_rendered_view(self, mocker, model, subject, narrow,
new_log_len, msg_id=1):
msg_w = mocker.Mock()
other_msg_w = mocker.Mock()
msg_w.original_widget.message = {'id': msg_id, 'subject': subject}
model.narrow = narrow
other_msg_w.original_widget.message = {'id': 2}
model.msg_list = mocker.Mock(log=[msg_w, other_msg_w])
# New msg widget generated after updating index.
new_msg_w = mocker.Mock()
cmbl = mocker.patch('zulipterminal.model.create_msg_box_list',
return_value=[new_msg_w])
model._update_rendered_view(msg_id)
# If there are 2 msgs and first one is updated, next one is updated too
if new_log_len == 2:
other_msg_w = new_msg_w
assert model.msg_list.log == [new_msg_w, other_msg_w][-new_log_len:]
assert model.controller.update_screen.called
@pytest.mark.parametrize('subject, narrow, narrow_changed', [
('foo', [['stream', 'boo'], ['topic', 'foo']], False),
('foo', [['stream', 'boo'], ['topic', 'not foo']], True),
('foo', [], False),
], ids=[
'same_topic_narrow',
'previous_topic_narrow_empty_so_change_narrow',
'same_all_messages_narrow',
])
def test__update_rendered_view_change_narrow(self, mocker, model, subject,
narrow, narrow_changed,
msg_id=1):
msg_w = mocker.Mock()
other_msg_w = mocker.Mock()
msg_w.original_widget.message = {'id': msg_id, 'subject': subject}
model.narrow = narrow
model.msg_list = mocker.Mock(log=[msg_w])
# New msg widget generated after updating index.
new_msg_w = mocker.Mock()
cmbl = mocker.patch('zulipterminal.model.create_msg_box_list',
return_value=[new_msg_w])
model._update_rendered_view(msg_id)
assert model.controller.narrow_to_topic.called == narrow_changed
assert model.controller.update_screen.called
@pytest.mark.parametrize('response, index', [
({'emoji_code': '1f44d',
'id': 2,
'user': {
'email': 'Foo@zulip.com',
'user_id': 5140,
'full_name': 'Foo Boo'
},
'reaction_type': 'unicode_emoji',
'message_id': 1,
'emoji_name': 'thumbs_up',
'type': 'reaction',
'op': 'add'
}, {
'messages': {
1: {
'id': 1,
'content': 'Boo is Foo',
'reactions': [
{
'user': {
'email': 'Foo@zulip.com',
'user_id': 1,
'full_name': 'Foo Boo'
},
'reaction_type': 'unicode_emoji',
'emoji_code': '1232',
'emoji_name': 'thumbs_up'
}
],
},
2: {
'id': 2,
'content': "Boo is not Foo",
'reactions': [],
}
}
})])
def test__handle_reaction_event(self, mocker, model, response, index):
model.index = index
model.msg_list = mocker.Mock()
mock_msg = mocker.Mock()
another_msg = mocker.Mock()
model.msg_list.log = [mock_msg, another_msg]
mock_msg.original_widget.message = index['messages'][1]
another_msg.original_widget.message = index['messages'][2]
mocker.patch('zulipterminal.model.create_msg_box_list',
return_value=[mock_msg])
model._handle_reaction_event(response)
update_emoji = model.index['messages'][1]['reactions'][1]['emoji_code']
assert update_emoji == response['emoji_code']
self.controller.update_screen.assert_called_once_with()
# TEST FOR FALSE CASES
model.index['messages'][1] = {}
model._handle_reaction_event(response)
# If there was no message earlier then don't update
assert model.index['messages'][1] == {}
@pytest.mark.parametrize('response, index', [
({'emoji_code': '1f44d',
'id': 2,
'user': {
'email': 'Foo@zulip.com',
'user_id': 5140,
'full_name': 'Foo Boo'
},
'reaction_type': 'unicode_emoji',
'message_id': 1,
'emoji_name': 'thumbs_up',
'type': 'reaction',
'op': 'add'
}, {
'messages': {
1: {
'id': 1,
'content': 'Boo is Foo',
'reactions': [
{
'user': {
'email': 'Foo@zulip.com',
'user_id': 1,
'full_name': 'Foo Boo'
},
'reaction_type': 'unicode_emoji',
'emoji_code': '1232',
'emoji_name': 'thumbs_up'
}
],
},
2: {
'id': 2,
'content': "Boo is not Foo",
'reactions': [],
}
}
})])
def test__handle_reaction_event_remove_reaction(self, mocker, model,
response, index):
model.index = index
mock_msg = mocker.Mock()
another_msg = mocker.Mock()
model.msg_list = mocker.Mock(log=[mock_msg, another_msg])
mock_msg.original_widget.message = index['messages'][1]
another_msg.original_widget.message = index['messages'][2]
mocker.patch('zulipterminal.model.create_msg_box_list',
return_value=[mock_msg])
# Test removing of reaction.
response['op'] = 'remove'
model._handle_reaction_event(response)
assert len(model.index['messages'][1]['reactions']) == 1
def test_update_star_status_no_index(self, mocker, model):
model.index = dict(messages={}) # Not indexed
event = dict(messages=[1], flag='starred', all=False, operation='add')
mocker.patch('zulipterminal.model.Model._update_rendered_view')
set_count = mocker.patch('zulipterminal.model.set_count')
model._handle_update_message_flags_event(event)
assert model.index == dict(messages={})
model._update_rendered_view.assert_not_called()
set_count.assert_not_called()
def test_update_star_status_invalid_operation(self, mocker, model):
model.index = dict(messages={1: {'flags': None}}) # Minimal
event = {
'messages': [1],
'type': 'update_message_flags',
'flag': 'starred',
'operation': 'OTHER', # not 'add' or 'remove'
'all': False,
}
mocker.patch('zulipterminal.model.Model._update_rendered_view')
set_count = mocker.patch('zulipterminal.model.set_count')
with pytest.raises(RuntimeError):
model._handle_update_message_flags_event(event)
model._update_rendered_view.assert_not_called()
set_count.assert_not_called()
@pytest.mark.parametrize('event_message_ids, indexed_ids', [
([1], [1]),
([1, 2], [1]),
([1, 2], [1, 2]),
([1], [1, 2]),
([], [1, 2]),
([1, 2], []),
])
@pytest.mark.parametrize('event_op, flags_before, flags_after', [
('add', [], ['starred']),
('add', ['read'], ['read', 'starred']),
('add', ['starred'], ['starred']),
('add', ['read', 'starred'], ['read', 'starred']),
('remove', [], []),
('remove', ['read'], ['read']),
('remove', ['starred'], []),
('remove', ['read', 'starred'], ['read']),
('remove', ['starred', 'read'], ['read']),
])
def test_update_star_status(self, mocker, model, event_op,
event_message_ids, indexed_ids,
flags_before, flags_after):
model.index = dict(messages={msg_id: {'flags': flags_before}
for msg_id in indexed_ids})
event = {
'messages': event_message_ids,
'type': 'update_message_flags',
'flag': 'starred',
'operation': event_op,
'all': False,
}
mocker.patch('zulipterminal.model.Model._update_rendered_view')
set_count = mocker.patch('zulipterminal.model.set_count')
model._handle_update_message_flags_event(event)
changed_ids = set(indexed_ids) & set(event_message_ids)
for changed_id in changed_ids:
assert model.index['messages'][changed_id]['flags'] == flags_after
(model._update_rendered_view.
assert_has_calls([mocker.call(changed_id)
for changed_id in changed_ids]))
for unchanged_id in (set(indexed_ids) - set(event_message_ids)):
assert (model.index['messages'][unchanged_id]['flags']
== flags_before)
set_count.assert_not_called()
@pytest.mark.parametrize('event_message_ids, indexed_ids', [
([1], [1]),
([1, 2], [1]),
([1, 2], [1, 2]),
([1], [1, 2]),
([], [1, 2]),
([1, 2], []),
])
@pytest.mark.parametrize('event_op, flags_before, flags_after', [
('add', [], ['read']),
('add', ['read'], ['read']),
('add', ['starred'], ['starred', 'read']),
('add', ['read', 'starred'], ['read', 'starred']),
('remove', [], []),
('remove', ['read'], ['read']), # msg cannot be marked 'unread'
('remove', ['starred'], ['starred']),
('remove', ['starred', 'read'], ['starred', 'read']),
('remove', ['read', 'starred'], ['read', 'starred']),
])
def test_update_read_status(self, mocker, model, event_op,
event_message_ids, indexed_ids,
flags_before, flags_after):
model.index = dict(messages={msg_id: {'flags': flags_before}
for msg_id in indexed_ids})
event = {
'messages': event_message_ids,
'type': 'update_message_flags',
'flag': 'read',
'operation': event_op,
'all': False,
}
mocker.patch('zulipterminal.model.Model._update_rendered_view')
set_count = mocker.patch('zulipterminal.model.set_count')
model._handle_update_message_flags_event(event)
changed_ids = set(indexed_ids) & set(event_message_ids)
for changed_id in changed_ids:
assert model.index['messages'][changed_id]['flags'] == flags_after
if event_op == 'add':
model._update_rendered_view.assert_has_calls(
[mocker.call(changed_id)])
elif event_op == 'remove':
model._update_rendered_view.assert_not_called()
for unchanged_id in (set(indexed_ids) - set(event_message_ids)):
assert (model.index['messages'][unchanged_id]['flags']
== flags_before)
if event_op == 'add':
set_count.assert_called_once_with(list(changed_ids),
self.controller, -1)
elif event_op == 'remove':
set_count.assert_not_called()
@pytest.mark.parametrize('narrow, event, called', [
# Not in PM Narrow
([], {}, False),
# Not in PM Narrow with sender
(
[['pm_with', 'iago@zulip.com']],
{
'type': 'typing',
'op': 'start',
'sender': {
'user_id': 4,
'email': 'hamlet@zulip.com'
},
'recipients': [{
'user_id': 4,
'email': 'hamlet@zulip.com'
}, {
'user_id': 5,
'email': 'iago@zulip.com'
}],
'id': 0
},
False,
),
# In PM narrow with the sender, OP - 'start'
(
[['pm_with', 'hamlet@zulip.com']],
{
'type': 'typing',
'op': 'start',
'sender': {
'user_id': 4,
'email': 'hamlet@zulip.com'
},
'recipients': [{
'user_id': 4,
'email': 'hamlet@zulip.com'
}, {
'user_id': 5,
'email': 'iago@zulip.com'
}],
'id': 0
},
True,
),
# OP - 'stop'
(
[['pm_with', 'hamlet@zulip.com']],
{
'type': 'typing',
'op': 'stop',
'sender': {
'user_id': 4,
'email': 'hamlet@zulip.com'
},
'recipients': [{
'user_id': 4,
'email': 'hamlet@zulip.com'
}, {
'user_id': 5,
'email': 'iago@zulip.com'
}],
'id': 0
},
True,
)
], ids=['not_in_pm_narrow', 'not_in_pm_narrow_with_sender',
'start', 'stop'])
def test__handle_typing_event(self, mocker, model,
narrow, event, called):
mocker.patch('zulipterminal.ui.View.set_footer_text')
model.narrow = narrow
model.user_dict = {'hamlet@zulip.com': {'full_name': 'hamlet'}}
model._handle_typing_event(event)
assert model.controller.view.set_footer_text.called == called
@pytest.mark.parametrize('event, final_muted_streams, ', [
(
{'property': 'in_home_view',
'stream_id': 19,
'value': True},
{15}
),
(
{'property': 'in_home_view',
'stream_id': 30,
'value': False},
{15, 19, 30}
)
], ids=[
'remove_19', 'add_30'
])
def test__handle_subscription_event(self, model, mocker, stream_button,
event, final_muted_streams):
model.muted_streams = {15, 19}
model.controller.view.stream_id_to_button = {
event['stream_id']: stream_button # stream id is known
}
mark_muted = mocker.patch(
'zulipterminal.ui_tools.buttons.StreamButton.mark_muted')
mark_unmuted = mocker.patch(
'zulipterminal.ui_tools.buttons.StreamButton.mark_unmuted')
model._handle_subscription_event(event)
assert model.muted_streams == final_muted_streams
if event['value']:
mark_unmuted.assert_called_once_with()
else:
mark_muted.assert_called_once_with()
model.controller.update_screen.assert_called_once_with()
@pytest.mark.parametrize('muted_streams, stream_id, is_muted', [
({1}, 1, True),
({1}, 2, False),
(set(), 1, False),
], ids=['muted_stream', 'unmuted_stream', 'unmuted_stream_nostreamsmuted'])
def test_is_muted_stream(self, muted_streams, stream_id, is_muted,
stream_dict, model):
model.stream_dict = stream_dict
model.muted_streams = muted_streams
assert model.is_muted_stream(stream_id) == is_muted
@pytest.mark.parametrize('topic, is_muted', [
((1, 'stream muted & unmuted topic'), True),
((2, 'muted topic'), True),
((1, 'muted stream muted topic'), True),
((2, 'unmuted topic'), False),
])
def test_is_muted_topic(self, topic, is_muted, stream_dict, model):
model.stream_dict = stream_dict
model.muted_streams = [1]
model.muted_topics = [
['Stream 2', 'muted topic'],
['Stream 1', 'muted stream muted topic'],
]
assert model.is_muted_topic(stream_id=topic[0],
topic=topic[1]) == is_muted
| 40.817301 | 79 | 0.530188 |
acde90831cdac43e5f376d3122b96d0c9d5791c9 | 26,681 | py | Python | download/dmTxt2Html.py | DavidMertz/gnosis-web | df5ab0d470a02ed6c51c971f2b40f1dcdc7d7350 | [
"CC0-1.0"
] | null | null | null | download/dmTxt2Html.py | DavidMertz/gnosis-web | df5ab0d470a02ed6c51c971f2b40f1dcdc7d7350 | [
"CC0-1.0"
] | null | null | null | download/dmTxt2Html.py | DavidMertz/gnosis-web | df5ab0d470a02ed6c51c971f2b40f1dcdc7d7350 | [
"CC0-1.0"
] | null | null | null | __oneliner__="Convert ASCII source files for HTML presentation"
__longdoc__="""
This program is not yet particularly smart, and will produce
undefined output (or even traceback) if the source file does
not meet expected format. With time, it may get better about
this.
#------------------- Shell Usage -----------------------#
Usage: python dmTxt2Html.py [options] [filename] (or)
txt2html.cgi [options] [filename]
-h, /h, -?, /?, ?: Show this help screen
-type:<type>: Set conversion type
(see discussion of types)
-REBUILD_DOCS: Generate 'txt2html.txt'
-out:<out>: Output filename (default STDOUT)
-proxy:<mode>: Use proxy element(s) in output
<STDIN>, -, /: Input from STDIN (default)
Proxy modes are: NAVIGATOR, TRAP_LINKS, ALL, NONE.
Elements are "navigation bar" at top of pages and
virtualization of in-page links. Shell default in NONE.
Note: CGI is detected by the absence of arguments;, if
all defaults are wanted, specify STDIN explicitly:
python txt2html.py - < MyArticle.txt > MyArticle.html
-
#-------------------- CGI Usage ------------------------#
Usage: A URL string may be composed manually, but the
normal usage will be to call txt2html.cgi from an
HTML form with the fields: 'source', 'preface,
'type', 'proxy'. 'preface' allows explicit
overriding of HTTP headers in the returned page,
normally as a hidden field. Use with caution (or
don't use at all, the default is sensible).
Example: <form method="get" action="http://gnosis.cx/cgi-bin/txt2html.cgi">
URL: <input type="text" name="source" size=40>
<input type="submit" name="go" value="Display!"></form>
------------------------------------------------------------------------
Expected input format for [HTML]
Source HTML is presented unmodified except for the inclusion
of the Txt2HTML proxy at the top of each page.
Expected input format for [PYTHON]
Source Python code is marked up with syntax highlighting, but
no other HTML elements are introduced (no headers, no bold, no
URLs, etc)
Expected input format for [SMART_ASCII]
#--- Paragraph rules: ---#
- Title occurs on first line of document, unindented and in
all caps.
- Subtitle occurs on second line, unindented and in mixed
case.
- Name, affiliation, date occur, unindented and in mixed
case, on lines 4-6.
- Section headings are preceded by two blank lines,
unindented, in all caps, followed by one line of 72
dashes and one blank line.
- Regular text paragraphs are block style, and are indented
two spaces.
- Block quotations are indented four spaces, rather than
the two of original text.
- Code samples are indented six spaces (with internal
indentation of code lines in the proper relative
position).
- Code samples may begin with a line indicating a title for
that block. If present, this title is indented the same
six spaces as the rest of the block, and begins and ends
with a pound sign ('#'). Dashes are used to fill space
within the title for ASCII asthetics.
-
#--- Character rules: ---#
- All character markup has the pattern:
whitespace-symbol-words(s)-symbol-whitespace
Examples are given, and this can be searched for
programmatically. The use of character markup applies
*only* to text paragraphs, *not* to code samples!
- Asterisks are used for an inflectional emphasis. For
example, "All good boys *deserve* fudge." This would
typically be indicated typographically with boldface or
italics.
- Underscores are used for book/journal citation. For
example, "Knuth's _Art of Computer Programming_ is
essential." This would typically be indicated
typographically with italics or underline.
- Single-stroke is used to indicate filenames and function
names. For example, "Every C program has a 'main()'
function." This might be indicated typographically by a
fixed font, by boldface, or simply by single-quotes.
- Braces are used to indicate a module, package or library.
For example, "The [cre] module will replace [re] in
Python 1.6." This will probably be indicated
typographically as a fixed font.
- Double-stroke is used as either inline quotation or scare
quotes. For example, "It may not be as "easy" as
suggested." In either case, typographic quotes are
probably the best format; italics would make some sense
also.
- Parenthesis are used, and should be preserved as is.
- Angle brackets and curly brackets have no special meaning
yet. I may choose to use those if there is something I
think the above forms do not capture.
- Em-dashes, diacritics, ligatures, and typographic
quotations are not available, and standard ASCII
approximations are used.
-
#--- Miscellany: ---#
- URL's are automatically transformed into a hotlink.
Basically, anything that starts with 'http://', 'ftp://',
'file://' or 'gopher://' looks like a URL to the program.
"""
__doc__=__oneliner__+__longdoc__
__comments__="""
This script utilizes the services of the Marc-Andre Lemburg's Python
Highlighter for HTML (v0.5+) [py2html]. [py2html] in turn relies on
Just van Rossum's [PyFontify] (v.0.3.1+) If these are not present,
Txt2HTML hopes to degrade gracefully, but will not provide syntax
highlighting for Python source code.
"""
__author__=["David Mertz (mertz@gnosis.cx)",]
__copyright__="""
This file is released to the public domain. I (dqm) would
appreciate it if you choose to keep derived works under terms
that promote freedom, but obviously am giving up any rights
to compel such.
"""
__version__="version 0.2 (August 2000)"
#-- import stuff, or at least try
import py2html, sys, re, string, time
from urllib import urlopen
from cStringIO import *
try:
import py2html
py_formatter = 1
except:
py_formatter = 0
#-- Define some HTML boilerplate
cgi_home = "http://gnosis.cx/cgi-bin/"
html_open =\
"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
<html>
<head>
<title>%s</title>
<style>
{font-family: helvetica, helv, arial, sans-serif; font-size: 10pt; text-align: left}
em {font-family: helvetica, helv, arial, sans-serif; font-size: 10pt}
ul, ol, li {font-family: helvetica, helv, arial, sans-serif; font-size: 10pt; text-align: left}
tt {font-family: mono, courier}
pre {font-family: mono; font-size: 9pt}
th {font-family: mono, courier; font-size: 8pt}
.code-sample {background-color: #CCCCCC}
.sidebar {background-color: #FFFFCC}
.title {font-size: medium; color: #3333CC}
.subtitle {font-size: 9pt; color: #3333CC}
.subhead {font-size: 12pt}
.input {background: #FFFFFF; color: #000000; font-weight: normal}
</style>
</head>
<body bgcolor="#ffffff" text="#000000">
"""
html_title = "Automatically Generated HTML"
html_close = "</body></html>"
code_block = \
"""<p><strong>%s</strong>
<table border="0" cellpadding="0" class="code-sample" width="100%%"><tr><td>
%s
</td></tr></table></p>
"""
promo = """
<table cellpadding=0 cellspacing=0 border=0 bgcolor="#000000" width="99%%" align=center>
<tr><td>
<table width="100%%" cellspacing=1 cellpadding=3 border=0>
<form method="get" action='"""+cgi_home+"""txt2html.cgi'>
<tr><td bgcolor="#cccccc">
<a href='"""+cgi_home+"""txt2html.cgi?source=../rc/nicelinks.txt'>
<img src='http://gnosis.cx/rc/eye.gif' border=0 alt="More"
align="right" align="top"></a>
URL to convert:
<input type="text" name="source" size=65 value="%(source)s"><br>
Conversion Type: <select name="type">
<option selected>INFER
<option>SIMPLE
<option>SMART_ASCII
<option>FAQ
<option>PYTHON
<option>RAW
<option>HTML </select>
Proxy-mode: <select name="proxy">
<option>NAVIGATOR
<option>TRAP_LINKS
<option selected>ALL
<option>NONE </select>
<input type="submit" name="go" value="Display!">
</td></tr>
</form>
<tr><td bgcolor="#cceecc">
Generated from <tt>%(source)s</tt> by
<a href="http://gnosis.cx/download/txt2html.zip"><tt>Txt2Html</tt></a>
</td></tr>
</table>
</td></tr>
</table>
"""
#-- End of boilerplate
def main(cfg_dict):
"""Process input source, as per configuration dictionary.
Just one value is passed to txt2html.main(): a dictionary of operation
options. The dictionary must hold the following keys:
'source': URL of source (or local filename) or '<STDIN>'
'target': Filename of output or '<STDOUT>'
'preface': (optional) HTTP header for output, esp. if CGI
'type': (of input) 'HTML','SMART_ASCII','PYTHON','SIMPLE',...
'proxy': 'NAVIGATOR', 'TRAP_LINKS', 'ALL', 'NONE'
"""
replace = string.replace
#-- Read the configuration, set options
source = cfg_dict['source']
if source == '<STDIN>':
fhin = sys.stdin
cfg_dict['source'] = '<STDIN>'
else:
try:
fhin = urlopen(source)
except:
ErrReport(source+' could not be opened!', cfg_dict)
return
target = cfg_dict['target']
if target == '<STDOUT>':
fhout = sys.stdout
else:
fhout = open(target, 'w')
#-- Get intype and the regex list
intype = cfg_dict['type']
re_list = Regex_by_filetype(intype)
#-- Process as needed for input type
if intype in ['HTML']:
if cfg_dict.has_key('preface'): fhout.write(cfg_dict['preface'])
doc = ''
for line in fhin.readlines(): # Need to normalize line endings!
doc = doc+string.rstrip(line)+'\n'
if cfg_dict['proxy'] in ['ALL', 'NAVIGATOR']: # proxy nav bar
doc = re.sub('(?im)(<BODY(?:.*?)>)','\\1'+(promo % cfg_dict),doc)
if cfg_dict['proxy'] <> 'NONE': # absolute links
doc = Absolutize(doc, source)
if cfg_dict['proxy'] in ['ALL', 'TRAP_LINKS']: # proxy links
doc = Proxify(doc, cgi_home+'txt2html.cgi?source=')
fhout.write(doc)
elif intype in ['SMART_ASCII']:
#sys.stderr.write(' start: ' + time.ctime(time.time())+"\n")
blocks = Make_Blocks(fhin, re_list)
#sys.stderr.write(' blocks made: ' + time.ctime(time.time())+"\n")
Process_Blocks(fhout, blocks, cfg_dict, title_block=1)
#sys.stderr.write('blocks processed: ' + time.ctime(time.time())+"\n")
elif intype in ['PYTHON', 'FAQ', 'SIMPLE', 'RAW']:
blocks = Make_Blocks(fhin, re_list)
Process_Blocks(fhout, blocks, cfg_dict)
else:
ErrReport('Something is amiss with filetype detection', cfg_dict)
return
def Make_Blocks(fhin, re_list):
#-- Initialize the globals
global state, blocks, bl_num, newblock
state = "HEADER"
blocks = [""]
bl_num = 0
newblock = 1
#-- Unpack the regex list
blankln, headln, textln, quoteln, codeln = re_list
#-- Break the file into relevant chunks
for line in fhin.readlines():
line = string.rstrip(line)+'\n' # Need to normalize line endings!
if state == "HEADER": # blank line means new block of ??
if blankln.match(line): newblock = 1
elif textln.match(line): startText(line)
elif quoteln.match(line): startQuote(line)
elif codeln.match(line): startCode(line)
else:
if newblock: startHead(line)
else: blocks[bl_num] = blocks[bl_num] + line
elif state == "TEXT": # blank line means new block of ??
if blankln.match(line): newblock = 1
elif headln.match(line): startHead(line)
elif quoteln.match(line): startQuote(line)
elif codeln.match(line): startCode(line)
else:
if newblock: startText(line)
else: blocks[bl_num] = blocks[bl_num] + line
elif state == "QUOTE": # blank line means new block of ??
if blankln.match(line): newblock = 1
elif headln.match(line): startHead(line)
elif textln.match(line): startText(line)
# do not transition quote->code without a blank line
# elif codeln.match(line): startCode(line)
else:
if newblock: startQuote(line)
else: blocks[bl_num] = blocks[bl_num] + line
elif state == "CODE": # blank line does not change state
if blankln.match(line): blocks[bl_num] = blocks[bl_num] + line
elif headln.match(line): startHead(line)
elif textln.match(line): startText(line)
else: blocks[bl_num] = blocks[bl_num] + line
else:
raise ValueError, "unexpected input block state: "+state
return blocks
def Process_Blocks(fhout, blocks, cfg_dict, title_block=0):
# Process all blocks, then write out headers and body
# (performance might be snappier in CGI if we wrote block-by-block...)
#-- check for server errors
re_err = re.compile('^.*<TITLE>.*(404)|(403).*</TITLE>', re.I | re.S | re.M)
block = string.join(blocks[0:3])
if re_err.match(block):
html_title = 'Server reported error on URL'
re_body = re.compile('<BODY(?:.*?)>(.*)</BODY>', re.I | re.M | re.S)
body = re_body.match(block).group(1)
else:
# Worry about having the right number of blocks
if len(blocks) == 1:
blocks = ['',cfg_dict['type'],cfg_dict['source'],
'Weird failure parsing source experienced!']
elif len(blocks) <= 3 or not title_block:
blocks = ['','Type of file: '+cfg_dict['type'],
cfg_dict['source']] + blocks[1:]
# Give it a nice page title
html_title = string.join(string.split(blocks[1], '\n'),' -- ')
html_title = string.replace(html_title, '[HEAD]', '')
# Massage each block as needed
body = Titleify(blocks[1]) + Authorify(blocks[2])
for block in blocks[3:]:
if block[:6]=='[CODE]': block = fixcode(block[6:],cfg_dict['type'])
elif block[:6]=='[QUOT]': block = fixquote(block[6:])
elif block[:6]=='[TEXT]': block = fixtext(block[6:])
elif block[:6]=='[HEAD]': block = fixhead(block[6:])
else: raise ValueError, "unexpected block marker: "+block[:6]
if cfg_dict['proxy'] in ['ALL', 'TRAP_LINKS']:
block = Proxify(block, cgi_home+'txt2html.cgi?source=')
body = body+block
#-- Output with surrounding document HTML/HTTP
if cfg_dict.has_key('preface'):
fhout.write(cfg_dict['preface'])
fhout.write(html_open % html_title)
if cfg_dict['proxy'] in ['ALL', 'NAVIGATOR']:
fhout.write(promo % cfg_dict)
fhout.write(body)
fhout.write(html_close)
def ErrReport(mess, cfg_dict):
if cfg_dict.has_key('preface'): print cfg_dict['preface']
print (html_open % html_title)
if cfg_dict['proxy'] in ['ALL', 'NAVIGATOR']:
print (promo % cfg_dict)
print '<h1>'+mess+'</h1>'
print html_close
#-- Functions for start of block-type state
def startHead(line):
global state, blocks, bl_num, newblock
state = "HEADER"
bl_num = bl_num+1
blocks.append('[HEAD]'+line)
newblock = 0
def startText(line):
global state, blocks, bl_num, newblock
state = "TEXT"
bl_num = bl_num+1
blocks.append('[TEXT]'+line)
newblock = 0
def startQuote(line):
global state, blocks, bl_num, newblock
state = "QUOTE"
bl_num = bl_num+1
blocks.append('[QUOT]'+line)
newblock = 0
def startCode(line):
global state, blocks, bl_num, newblock
state = "CODE"
bl_num = bl_num+1
blocks.append('[CODE]'+line)
newblock = 0
#-- Functions to massage blocks by type
def Titleify(block):
title, subtitle, other = ('No Title', '', '')
block = string.replace(block, '[HEAD]', '')
#block = string.strip(block)
block = Typographify(block)
lines = string.split(block, '\n')
if len(lines) > 0:
title = lines[0]
title = string.capwords(title)
if len(lines) > 1:
subtitle = lines[1]
for i in range(2,len(lines)):
other = other+'<br>'+lines[i]+'\n'
block = """
<p><strong class="title">%s</strong><br>
<strong class="subtitle">%s</strong></p>
%s""" % (title, subtitle, other)
return block
def Authorify(block):
block = string.replace(block, '[HEAD]', '')
#block = string.strip(block)
lines = string.split(block, '\n')
block = "<p>" +string.join(map(lambda ln: ln+'<br>\n', lines))+"</p>\n"
block = Typographify(block)
return block
def fixcode(block, doctype):
# Some HTML preparation
block = Detag(block)
block = LeftMargin(block)
# Pull out title if available
re_title = re.compile('^#\-+ (.+) \-+#$', re.M)
if_title = re_title.match(block)
if if_title:
title = if_title.group(1)
block = re_title.sub('', block) # take title out of code
else: title = ''
#block = string.strip(block) # no surrounding whitespace
# Process the code block with Py2HTML (if possible and appropriate)
if py_formatter and (string.count(title,'.py') or
string.count(title,'Python') or
string.count(title,'python') or
doctype == 'PYTHON'):
fh = open('tmp', 'w')
fh.write(block)
fh.close()
py2html.main([None, '-format:rawhtml', 'tmp'])
block = open('tmp.html').read()
block = code_block % (title, block)
# elif the-will-and-the-way-is-there-to-format-language-X:
# elif the-will-and-the-way-is-there-to-format-language-Y:
else:
block = code_block % (title, '<pre>'+block+'</pre>')
return block
def fixquote(block):
block = Detag(block)
block = Typographify(block)
block = '<blockquote>%s</blockquote>' % block
block = URLify(block)
return block
def fixtext(block):
block = Detag(block)
block = Rulify(block)
block = Typographify(block)
block = '<p>%s</p>\n' % block
block = URLify(block)
return block
def fixhead(block):
block = Detag(block)
block = NoRule(block)
#block = string.strip(block)
block = AdjustCaps(block)
block = Typographify(block+" ")
return block
#-- Utility functions for text transformation
def AdjustCaps(txt):
# Bare header is block in ALLCAPS (excluding [module] names)
non_lit = re.sub("['[].*[]']", '', txt)
if non_lit == string.upper(non_lit):
txt = capwords(txt)
txt = '<p><strong class="subhead">%s</strong></p>\n' % txt
else:
txt = '<h3>%s</h3>' % txt
txt = URLify(txt)
return txt
def capwords(txt):
"""string.capwords does'nt do what we want for 'quoted' stuff"""
words = string.split(txt)
for n in range(len(words)):
if not words[n][0] in "'[": words[n] = string.capwords(words[n])
return string.join(words, ' ')
def LeftMargin(txt):
"""Remove as many leading spaces as possible from whole block"""
for l in range(12,-1,-1):
re_lead = '(?sm)'+' '*l+'\S'
if re.match(re_lead, txt): break
txt = re.sub('(?sm)^'+' '*l, '', txt)
return txt
def Detag(txt):
txt = string.replace(txt, '&', '&')
txt = string.replace(txt, '<', '<')
txt = string.replace(txt, '>', '>')
return txt
def URLify(txt):
txt0 = txt
# Convert special IMG URL's, e.g. {Alt Text: http://site.org/img.png}
# (don't actually try quite as hard to validate URL though)
txt = re.sub('(?sm){(.*?):\s*(http://.*)}', '<img src="\\2" alt="\\1">', txt)
# Convert regular URL's
txt = re.sub('(?:[^="])((?:http|ftp|gopher|file)://(?:[^ \n\r<\)]+))(\s)',
'<a href="\\1">\\1</a>\\2', txt)
return txt
def Proxify(txt, cgi_prog):
txt = re.sub("""(?i)(<a href=["']?)""", '\\1'+cgi_prog, txt)
return txt
def Absolutize(txt, base_href):
"""Convert all relative links to absolute links"""
# Does base look right (not like <STDIN> or filename, basically)?
if string.count(base_href, "://"):
txt = string.replace(txt, '<HEAD>', '<HEAD><base href="'+base_href+'">')
txt = string.replace(txt, '<head>', '<head><base href="'+base_href+'">')
base_path = re.sub('(.*/).*', '\\1', base_href) # extract path portion
txt = re.sub("""(?im)(<a href=["']?)(?!\w+://)(.*?>)""",
'\\1'+base_path+'\\2', txt)
return txt
def Rulify(txt):
return re.compile('^-+$', re.M).sub('<hr>', txt)
def NoRule(txt):
return re.compile('^-+$', re.M).sub('', txt)
try:
from mxTypographify import Typographify
sys.stderr.write("** Using mxTypographify **\n")
except ImportError:
def Typographify(txt):
# [module] names
r = re.compile(r"""([\(\s'/">]|^)\[(.*?)\]([<\s\.\),:;'"?!/-])""", re.M | re.S)
txt = r.sub('\\1<em><code>\\2</code></em>\\3',txt)
# *strongly emphasize* words
r = re.compile(r"""([\(\s'/"]|^)\*(.*?)\*([\s\.\),:;'"?!/-])""", re.M | re.S)
txt = r.sub('\\1<strong>\\2</strong>\\3', txt)
# -emphasize- words
r = re.compile(r"""([\(\s'/"]|^)-(.*?)-([\s\.\),:;'"?!/])""", re.M | re.S)
txt = r.sub('\\1<em>\\2</em>\\3', txt)
# _Book Title_ citations
r = re.compile(r"""([\(\s'/"]|^)_(.*?)_([\s\.\),:;'"?!/-])""", re.M | re.S)
txt = r.sub('\\1<cite>\\2</cite>\\3', txt)
# 'Function()' names
r = re.compile(r"""([\(\s/"]|^)'(.*?)'([\s\.\),:;"?!/-])""", re.M | re.S)
txt = r.sub("\\1<code>\\2</code>\\3", txt)
return txt
def infer_type(fname):
lower = string.lower
if lower(fname[-5:])=='.html' or lower(fname[-4:])=='.htm':
intype = 'HTML'
elif fname[-1:] in ['.', '/']:
intype = 'HTML'
elif lower(fname[-3:])=='.py' or lower(fname[-4:])=='.cgi':
intype = 'PYTHON'
elif lower(fname[-4:])=='.faq':
intype = 'FAQ'
elif lower(fname[-4:])=='.txt':
intype = 'SMART_ASCII'
else:
intype = 'RAW'
return intype
def Regex_by_filetype(intype):
if intype == 'SMART_ASCII':
blankln = re.compile("^$")
headln = re.compile("\S") # no indent
textln = re.compile(" ? ?\S") # 1-3 spaces indent
quoteln = re.compile(" \S") # 4 spaces indent
codeln = re.compile("^ ") # 6+ spaces indent
elif intype == 'FAQ': # based on alt.compression FAQ
blankln = re.compile("^$")
headln = re.compile("(^Subject: )|(^\d\.\d[\s\.])")
textln = re.compile("^(?!(^\d\.\d[\s\.]))\S")
quoteln = re.compile("^[ -] ")
codeln = re.compile("$^")
elif intype in ['PYTHON', 'RAW']:
blankln=headln=textln=quoteln = re.compile("$^")
codeln = re.compile(".*")
elif intype == 'SIMPLE': # all content is testln
codeln=headln=quoteln = re.compile("$^")
blankln = re.compile("^$")
textln = re.compile(".*")
elif intype == 'HTML': # everything is code in HTML source
blankln=headln=textln=quoteln= re.compile("$^")
codeln = re.compile(".*")
else:
blankln = re.compile("^$")
headln=codeln=quoteln= re.compile("$^")
textln = re.compile(".*")
return (blankln, headln, textln, quoteln, codeln)
#-- Sort out meaning of passed arguments/variables
def ParseArgs(list):
upper = string.upper
cfg_dict = {'source': '<STDIN>',
'target': '<STDOUT>',
'type': 'INFER',
'proxy': 'NONE' }
for item in list:
if item in ['-h','/h','-?','/?', '?']: # help screen
print __doc__; return None
if item[0] in '/-': # a switch!
if upper(item[1:5]) == 'TYPE': # set type
cfg_dict['type'] = upper(item[6:])
if upper(item[1:6]) == 'PROXY': # set proxy mode
cfg_dict['proxy'] = upper(item[7:])
if upper(item[1:]) == 'REBUILD_DOCS':
fhtxt = open('txt2html.txt', 'w')
auth = string.join(__author__, '\n')
docs = ('DMTXT2HTML.PY\n'+__oneliner__+'\n'+
'\n'+auth+'\n'+__version__+'\n'+
__copyright__+'\n'+
__longdoc__+__comments__)
fhtxt.write(docs)
fhtxt.close()
cfg_dict['source'] = 'txt2html.txt'
else: # not switch, set source
cfg_dict['source'] = item
if cfg_dict['type'] == 'INFER':
cfg_dict['type'] = infer_type(cfg_dict['source'])
global promo # fix up proxy navigation bar
promo = string.replace(promo, cgi_home, '')
return cfg_dict
def ParseCGI():
import cgi
upper = string.upper
cfg_dict = {'target': '<STDOUT>'}
sys.stderr = sys.stdout
form = cgi.FieldStorage()
if form.has_key('source'):
cfg_dict['source'] = form['source'].value
else:
cfg_dict['source'] = '../rc/txt2html.txt'
if form.has_key('type') and upper(form['type'].value)<>'INFER':
cfg_dict['type'] = upper(form['type'].value)
else:
cfg_dict['type'] = infer_type(cfg_dict['source'])
if form.has_key('preface'): # use with caution!
cfg_dict['preface'] = form['preface'].value
else:
cfg_dict['preface'] = 'Content-type: text/html\n\n'
if form.has_key('proxy'):
cfg_dict['proxy'] = form['proxy'].value
else:
cfg_dict['proxy'] = 'ALL'
return cfg_dict
#-- The module level code
if __name__ == '__main__':
# Set options based on runmode (shell vs. CGI)
if len(sys.argv) >= 2:
cfg_dict = ParseArgs(sys.argv[1:])
else:
cfg_dict = ParseCGI()
if cfg_dict: main(cfg_dict)
| 37.84539 | 97 | 0.575653 |
acde90fb4d97b02791d5483a94a97e5a6d939938 | 147 | py | Python | CursoEmVideo/WhileAnnoying.py | yazdejesus/FirstLessons-Python | e2c2d75c3aba459669c972a3c959b18ecc82e030 | [
"MIT"
] | null | null | null | CursoEmVideo/WhileAnnoying.py | yazdejesus/FirstLessons-Python | e2c2d75c3aba459669c972a3c959b18ecc82e030 | [
"MIT"
] | 1 | 2021-01-28T17:06:06.000Z | 2021-01-30T15:13:03.000Z | CursoEmVideo/WhileAnnoying.py | yazdejesus/FirstLessons-Python | e2c2d75c3aba459669c972a3c959b18ecc82e030 | [
"MIT"
] | null | null | null | sexo = input("Introduza o sexo: ")
while sexo not in "MmFf":
sexo = input("A sério? Digite apenas M ou F")
print(f"Parabéns, o teu sexo é {sexo}") | 36.75 | 46 | 0.673469 |
acde916da9bcdc3a825e9fb453f56505e1a4fcc6 | 2,543 | py | Python | python/tests/ks/loop.py | wadaniel/marlpde | 9f702fb787de33cebe61c503655b18b542bdc7bb | [
"MIT"
] | null | null | null | python/tests/ks/loop.py | wadaniel/marlpde | 9f702fb787de33cebe61c503655b18b542bdc7bb | [
"MIT"
] | null | null | null | python/tests/ks/loop.py | wadaniel/marlpde | 9f702fb787de33cebe61c503655b18b542bdc7bb | [
"MIT"
] | null | null | null | #!/bin/python3
import sys
sys.path.append('./../../_model/')
from KS import *
import matplotlib.pyplot as plt
# LES defaults
numActions = 32
gridSize = 32
# DNS defaults
N = 2048
L = 22
nu = 1.0
dt = 0.25
seed = 42
tTransient = 50
tEnd = 550
tSim = tEnd - tTransient
nSimSteps = tSim/dt
episodeLength = 500
rewardFactor = 1
# DNS baseline
def setup_dns_default(N, dt, nu , seed):
print("[ks_environment] setting up default dns")
# simulate transient period
dns = KS(L=L, N=N, dt=dt, nu=nu, tend=tTransient, seed=seed)
dns.simulate()
dns.fou2real()
u_restart = dns.uu[-1,:].copy()
v_restart = dns.vv[-1,:].copy()
# simulate rest
dns.IC( u0 = u_restart)
dns.simulate( nsteps=int(tSim/dt), restart=True )
dns.fou2real()
dns.compute_Ek()
return dns
# DNS baseline
dns = KS(L=L, N=N, dt=dt, nu=nu, tend=tTransient)
dns.simulate()
dns.fou2real()
dns = setup_dns_default(N, dt, nu, seed)
## restart
v_restart = dns.vv[-1,:].copy()
u_restart = dns.uu[-1,:].copy()
v0 = np.concatenate((v_restart[:((gridSize+1)//2)], v_restart[-(gridSize-1)//2:])) * gridSize / dns.N
## create interpolated IC
f_restart = interpolate.interp1d(dns.x, u_restart, kind='cubic')
# init rewards
rewards = []
# Initialize LES
sgs = KS(L=L, N = gridSize, dt=dt, nu=nu, tend=tSim)
sgs.IC( v0 = v0 )
sgs.setup_basis(numActions)
## run controlled simulation
error = 0
step = 0
nIntermediate = int(tSim / dt / episodeLength)
prevkMseLogErr = 0.
kMseLogErr = 0.
reward = 0.
cumreward = 0.
while step < episodeLength and error == 0:
# apply action and advance environment
actions = np.zeros(numActions)
try:
for _ in range(nIntermediate):
sgs.step(actions)
sgs.compute_Ek()
sgs.fou2real()
except Exception as e:
print("[ks_environment] Exception occured:")
print(str(e))
error = 1
break
# get new state
state = sgs.getState().flatten().tolist()
if(np.isnan(state).any() == True):
print("[ks_environment] Nan state detected")
error = 1
break
kMseLogErr = np.mean((np.abs(dns.Ek_ktt[sgs.ioutnum,1:gridSize//2] - sgs.Ek_ktt[sgs.ioutnum,1:gridSize//2])/dns.Ek_ktt[sgs.ioutnum,1:gridSize//2])**2)
reward = rewardFactor*(prevkMseLogErr-kMseLogErr)
prevkMseLogErr = kMseLogErr
cumreward += reward
if (np.isnan(reward)):
print("[ks_environment] Nan reward detected")
error = 1
break
step += 1
print(cumreward)
| 21.922414 | 154 | 0.631931 |
acde91cb0a56f88d7ea9f94cfca84a76ddb72cb0 | 3,316 | py | Python | tests/converters/test_convert_cals2formex.py | laurent-laporte-pro/benker | d4ccbf62732ac9e36788ba4137e45a6eaae84657 | [
"MIT"
] | 1 | 2020-05-13T10:31:13.000Z | 2020-05-13T10:31:13.000Z | tests/converters/test_convert_cals2formex.py | laurent-laporte-pro/benker | d4ccbf62732ac9e36788ba4137e45a6eaae84657 | [
"MIT"
] | 13 | 2019-05-21T18:53:00.000Z | 2021-11-11T07:43:32.000Z | tests/converters/test_convert_cals2formex.py | laurent-laporte-pro/benker | d4ccbf62732ac9e36788ba4137e45a6eaae84657 | [
"MIT"
] | 1 | 2020-05-19T15:23:57.000Z | 2020-05-19T15:23:57.000Z | # coding: utf-8
from __future__ import print_function
import shutil
import py.path # type hints
import pytest
import xmldiff.main
from lxml import etree
from benker.converters.cals2formex import convert_cals2formex
from tests.resources import RESOURCES_DIR
@pytest.mark.parametrize(
"input_name, expected_name, cals_prefix, cals_ns",
# fmt: off
[
(
"cals/tbl_small_table.xml",
"cals2formex/tbl_small_table.xml",
"cals",
"https://lib.benker.com/schemas/cals.xsd",
),
(
"cals/tbl_sample.xml",
"cals2formex/tbl_sample.xml",
"cals",
"https://lib.benker.com/schemas/cals.xsd",
),
(
"cals/tbl_sample_formex.xml",
"cals2formex/tbl_sample_formex.xml",
"cals",
"https://lib.benker.com/schemas/cals.xsd",
),
(
"cals/tbl_sample_cals2.xml",
"cals2formex/tbl_sample_cals2.xml",
None,
None,
),
(
"cals/fix_9_lost_text_in_entries.xml",
"cals2formex/fix_9_lost_text_in_entries.xml",
None,
None,
),
(
"cals/fix_11_corpus_width_missing.xml",
"cals2formex/fix_11_corpus_width_missing.xml",
None,
None,
),
(
"cals/fix_12_missing_colspec_attributes.xml",
"cals2formex/fix_12_missing_colspec_attributes.xml",
None,
None,
),
(
"cals/fix_10_embed_gr_notes1.xml",
"cals2formex/fix_10_embed_gr_notes1.xml",
None,
None,
),
(
"cals/fix_10_embed_gr_notes2.xml",
"cals2formex/fix_10_embed_gr_notes2.xml",
None,
None,
),
(
"cals/tbl_col_type_header.xml",
"cals2formex/tbl_col_type_header.xml",
None,
None,
),
],
# fmt: on
)
def test_convert_cals2formex(input_name, expected_name, cals_prefix, cals_ns, tmpdir):
# type: (str, str, str, str, py.path.local) -> None
src_xml = RESOURCES_DIR.join(input_name) # type: py.path.local
dst_xml = tmpdir.join(src_xml.basename)
convert_cals2formex(
str(src_xml), str(dst_xml), width_unit="mm", use_cals=True, cals_prefix=cals_prefix, cals_ns=cals_ns
)
# - Compare with expected
xml_parser = etree.XMLParser(remove_blank_text=True)
expected_xml = RESOURCES_DIR.join(expected_name) # type: py.path.local
if expected_xml.exists():
expected_tree = etree.parse(str(expected_xml), parser=xml_parser)
expected_elements = expected_tree.xpath("//TBL")
dst_tree = etree.parse(str(dst_xml), parser=xml_parser)
dst_elements = dst_tree.xpath("//TBL")
assert len(expected_elements) == len(dst_elements)
for dst_elem, expected_elem in zip(dst_elements, expected_elements):
diff_list = xmldiff.main.diff_trees(dst_elem, expected_elem)
assert not diff_list
else:
# missing resource: create it
shutil.copy(str(dst_xml), str(expected_xml))
print("You should check and commit the file: '{}'".format(expected_xml))
| 30.703704 | 108 | 0.587756 |
acde926d14e3a7a2aab9dfa37009ecbd913bce1d | 339 | py | Python | fastapi/src/db/base_class.py | smilix/cooking | 6ecb4d547e0c7e9996607290f51de8356af6c420 | [
"MIT"
] | null | null | null | fastapi/src/db/base_class.py | smilix/cooking | 6ecb4d547e0c7e9996607290f51de8356af6c420 | [
"MIT"
] | null | null | null | fastapi/src/db/base_class.py | smilix/cooking | 6ecb4d547e0c7e9996607290f51de8356af6c420 | [
"MIT"
] | null | null | null | from typing import Any
from sqlalchemy.ext.declarative import as_declarative, declared_attr
@as_declarative()
class Base:
id: Any
__name__: str
# Generate __tablename__ automatically
@declared_attr
def __tablename__(cls) -> str:
return cls.__name__.lower()
class WithOwnerMixin(object):
owner_id: Any
| 17.842105 | 68 | 0.725664 |
acde92ccaa831a63fb78950c115a9263ae9d57ce | 3,805 | py | Python | src/python/PSDG_Domain.py | ml4ai/tomcat-planrec | 70af6464851c641eb414fd1c818cfb5b1351e079 | [
"MIT"
] | null | null | null | src/python/PSDG_Domain.py | ml4ai/tomcat-planrec | 70af6464851c641eb414fd1c818cfb5b1351e079 | [
"MIT"
] | 4 | 2021-06-17T15:21:25.000Z | 2021-07-18T20:20:07.000Z | src/python/PSDG_Domain.py | ml4ai/tomcat-planrec | 70af6464851c641eb414fd1c818cfb5b1351e079 | [
"MIT"
] | null | null | null | from nltk import Nonterminal
from PSDG import PSDG
from PSDProduction import PSDProduction
import sys
import random
import copy
import itertools
from typing import List, Set, Dict, Tuple, Optional, Callable
class PSDG_Domain:
def __init__(
self, methods: List["PSDG_Method"], actions: List["PSDG_Action"]
) -> None:
self.methods = methods
self.actions = actions
def initialize_planning(self, i_state: Dict) -> None:
self.i_state = i_state
self.c_state = copy.deepcopy(i_state)
self.states = []
prods = []
for i in self.methods:
subtasks = []
for j in i.subtasks:
if j[0] == "!":
subtasks.append(j)
else:
subtasks.append(Nonterminal(j))
prods.append(
PSDProduction(
Nonterminal(i.task), subtasks, i_state, i.preconditions,
)
)
self.psdg = PSDG(Nonterminal("P"), prods)
def _sample_plan(
self, symbol: Optional[Nonterminal] = None, depth: Optional[int] = None
) -> List[str]:
if depth is None:
depth = sys.maxsize
if depth > 0:
if symbol is None:
symbol = self.psdg.start()
plan = []
prod = random.choices(
[x for x in self.psdg.productions(lhs=symbol)],
[x.prob() for x in self.psdg.productions(lhs=symbol)],
)[0]
for i in prod.rhs():
if isinstance(i, Nonterminal):
plan = plan + self._sample_plan(i, depth - 1)
else:
for j in self.actions:
if j.__name__ == i:
self.c_state = j.applyEffects(self.c_state)
self.psdg.update_prods(self.c_state)
self.states.append(copy.deepcopy(self.c_state))
plan.append(i)
return plan
return []
def sample_plans(
self,
n_samples: int = 1,
output_options: Optional[str] = None,
start: Optional[str] = None,
depth: Optional[int] = None,
):
state_seqs = []
plans = []
if start is not None:
start = Nonterminal(start)
for i in range(n_samples):
self.states = [self.i_state]
plans.append(self._sample_plan(start, depth))
state_seqs.append(self.states)
self.c_state = copy.deepcopy(self.i_state)
self.psdg.update_prods(self.c_state)
if output_options == "a":
return plans
if output_options == "s":
return state_seqs
if output_options == "as":
return tuple(zip(plans, state_seqs))
plan_traces = []
for i, j in enumerate(state_seqs):
trace = [None] * (len(j) + len(plans[i]))
trace[::2] = j
trace[1::2] = plans[i]
plan_traces.append(trace)
return plan_traces
class PSDG_Action:
def __init__(
self,
name: str,
effects: Callable[[Dict], Dict],
trans_prob: Callable[[Dict, Dict], float],
):
self.__name__ = name
self._effects = effects
self._trans_prob = trans_prob
def applyEffects(self, state: Dict):
return self._effects(state)
def state_trans_prob(self, state_0: Dict, state_1: Dict):
return self._trans_prob(state_0, state_1)
class PSDG_Method:
def __init__(
self,
task: str,
preconditions: Callable[[Dict], float],
subtasks: List[str],
):
self.task = task
self.preconditions = preconditions
self.subtasks = subtasks
| 29.96063 | 79 | 0.532983 |
acde92fa12deb8549f108abc7f5eb396679b1850 | 18,163 | py | Python | plant.py | zhtjtcz/PlantsVSZombies | 589d47536461fca95ea3e57a3f531b7bd8208e06 | [
"Apache-2.0"
] | 10 | 2020-09-04T03:26:41.000Z | 2021-11-18T05:29:42.000Z | plant.py | zhtjtcz/PlantsVSZombies | 589d47536461fca95ea3e57a3f531b7bd8208e06 | [
"Apache-2.0"
] | null | null | null | plant.py | zhtjtcz/PlantsVSZombies | 589d47536461fca95ea3e57a3f531b7bd8208e06 | [
"Apache-2.0"
] | 1 | 2020-10-12T08:08:35.000Z | 2020-10-12T08:08:35.000Z | from setting import *
import pygame
import random
import sys
import os
import time
import threading
import psutil
import subprocess
from multiprocessing import Process
from PIL import Image, ImageTk
class Sun():
def __init__(self,scr):
self.time=pygame.time.get_ticks()
self.path='Picture\Others\Sun\\'
self.sunlist=[]
self.screen=scr
self.sum=500
self.pick_list=[]
random.seed(a=None)
def Appear(self):
if (pygame.time.get_ticks()-self.time<=6000):
return
self.sunlist.append([random.randint(50,600),80,1,1,pygame.time.get_ticks(),1])
self.time=pygame.time.get_ticks()
#阳光生成
def Draw(self):
a=[]
b=[]
for sun in self.sunlist:
if (pygame.time.get_ticks() - sun[4] >= 15000):
continue
a.append(sun)
pic=pygame.image.load(self.path+str(sun[2])+'.png')
self.screen.blit(pic,(sun[0],sun[1]))
if (sun[1]<=450):
if (sun[5]>=1):
sun[1]+=1.6
sun[3]+=1
if (sun[3]>=Plant_Move_FPS):
sun[3]=1
sun[2]+=1
if (sun[2]>22):
sun[2]=1
for sun in self.pick_list:
if (pygame.time.get_ticks()-sun[5]>=500):
continue
b.append(sun)
deltatime=(pygame.time.get_ticks()-sun[5])/500
pic=pygame.image.load(self.path+str(sun[2])+'.png')
self.screen.blit(pic,(sun[0]-deltatime*sun[3],sun[1]-deltatime*sun[4]))
self.sunlist=[]
self.pick_list=[]
for sun in a:
self.sunlist.append(sun)
for sun in b:
self.pick_list.append(sun)
def Pick(self,click):
pos=( int(round(click[0])),int(round(click[1])) )
for sun in self.sunlist:
if (0<= pos[0]-sun[0] <=70 and 0<= pos[1]-sun[1]<=70):
self.sum+=25
sun[4]=-100000
self.pick_list.append((sun[0],sun[1],sun[2],(sun[0]-50),(sun[1]-50),pygame.time.get_ticks()))
break
#阳光拾取
#阳光类
class Card():
def __init__(self,scr,card_list):
self.screen=scr
self.path='Picture\Cards\\'
self.card_list=[]
self.cost_list=[]
self.cd_list=[]
self.time_list=[]
self.pic_list=[]
self.select=-1
for i in card_list:
self.card_list.append(i)
self.cost_list.append(Plant_cost[i])
self.cd_list.append(Plant_cd[i])
self.time_list.append(-INF)
def Draw(self,sunsum):
for i in range(0,len(self.card_list)):
img=pygame.image.load(self.path+str(self.card_list[i]+1)+'.png')
gray=pygame.Surface((Card_scale[0],Card_scale[1]+5),flags=0,depth=32)
gray.set_alpha(100)
self.screen.blit(img,(Card_pos[0]+i*Card_size,Card_pos[1]))
if (pygame.time.get_ticks()-self.time_list[i]<self.cd_list[i]):
x=(pygame.time.get_ticks()-self.time_list[i])/self.cd_list[i]
x=(1-x)
self.screen.blit(gray,(Card_pos[0]+i*Card_size,Card_pos[1]))
gray=pygame.Surface((Card_scale[0],int((Card_scale[1]+5)*x)),flags=0,depth=32)
gray.set_alpha(100)
self.screen.blit(gray,(Card_pos[0]+i*Card_size,Card_pos[1]))
else:
if (sunsum<self.cost_list[i]):
self.screen.blit(gray,(Card_pos[0]+i*Card_size,Card_pos[1]))
def Choose(self,click,sunsum):
if (sunsum<=0):
return
pos=( int(round(click[0])),int(round(click[1])) )
for i in range(len(self.card_list)):
if (pygame.time.get_ticks()-self.time_list[i]<self.cd_list[i]):
continue
if (sunsum<self.cost_list[i]):
continue
if (0<= pos[0]-(Card_pos[0]+i*Card_size)<=Card_Width and 0<=pos[1]-Card_pos[1]<=Card_Height):
self.select=self.card_list[i]
def Put(self,id):
for i in range(0,len(self.card_list)):
if (self.card_list[i]==id):
self.time_list[i]=pygame.time.get_ticks()
return
#卡片槽类
class Car():
def __init__(self,scr):
self.screen=scr
self.pos=[]
self.img=pygame.image.load('Picture\Others\Car.png')
for i in range(5):
self.pos.append(Coordinate_origin[0]-65)
self.sit=[0 for i in range(5)]
def Draw(self):
for i in range(5):
if (self.sit[i]==2):
continue
self.screen.blit(self.img,(self.pos[i],Coordinate_origin[1]+i*Block_size_height))
def Event(self,zombies):
for i in range(5):
if (self.sit[i]==2):
continue
if (self.sit[i]==1):
for zom in zombies:
if (zom.line!=i):
continue
if (zom.pos<=self.pos[i]+2):
zom.hp=0
self.pos[i]+=6.0
if (self.pos[i]>=800):
self.sit[i]=2
continue
for zom in zombies:
if (i!=zom.line):
continue
if (zom.pos<=-70):
self.sit[i]=1
return
#小推车类
class Bullet():
def __init__(self,scr,line,pos,id):
self.screen=scr
self.line=line
self.pos=pos
self.id=id
self.speed=8
self.path='Picture\Bullets\\'
self.exist=True
self.sit=0
def Draw(self):
if (self.pos>=800):
self.exist=False
return
img=pygame.image.load(self.path+str(self.id)+'.png')
if (self.id==3):
self.screen.blit(img,(self.pos+100,Coordinate_origin[1]+Block_size_height*(self.line)))
else:
self.screen.blit(img,(self.pos,Coordinate_origin[1]+Block_size_height*(self.line)))
if (self.id!=3):
self.pos+=self.speed
if (self.id==3):
self.sit+=1
if (self.sit==2):
self.exist=False
def Cha(self,a,b):
return a[0]*b[1]-a[1]*b[0]
def Vector(self,a,b):
return [a[0]-b[0],a[1]-b[1]]
def Cross(self,zombie):
pos=[self.pos,Coordinate_origin[1]+Block_size_height*(self.line)]
r=12
D=[zombie.pos,Coordinate_origin[1]+Block_size_height*(self.line-0.5)]
C=[zombie.pos+Block_size_width,Coordinate_origin[1]+Block_size_height*(self.line-0.5)]
B=[zombie.pos+Block_size_width,Coordinate_origin[1]+Block_size_height*(self.line+0.5)]
A=[zombie.pos,Coordinate_origin[1]+Block_size_height*(self.line+0.5)]
if ():
return True
else:
return False
def Attack(self,zombie):
if (self.id==3):
return
where=INF
for zom in zombie:
if (zom.hp<=0):
continue
if (self.line!=zom.line):
continue
if (abs(self.pos-zom.pos)<=10):
where=min(where,zom.pos)
if (where!=INF):
for zom in zombie:
if (zom.hp<=0):
continue
if (self.line!=zom.line):
continue
if (zom.pos==where):
zom.hp-=1
if (self.id==2):
zom.slow=True
zom.slow_time=pygame.time.get_ticks()
self.id=3
#self.pos+=60
return
#子弹类
#共三种豌豆,分别为普通豌豆,寒冰豌豆,爆开的豌豆
class Sunflower():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.cd=24000
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.pic_sum=Plant_Picsum[id]
self.time=pygame.time.get_ticks()-random.randint(0,self.cd)
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*pos[0])
#参数预处理
def Draw(self):
img=pygame.image.load(self.dic+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>self.pic_sum):
self.sit=1
#绘制图像
#每一幅图显示固定帧数,以达到动态显示的效果
def Danger(self,zomlist):
if (self.hp<=0):
return
cnt=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
cnt+=1
while (cnt>=0):
self.time-=0.02
cnt-=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
flag=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
flag=1
if (flag>=1):
self.Danger(zomlist)
if (pygame.time.get_ticks()-self.time >= self.cd):
a.sunlist.append([self.pic_pos[0]+10,self.pic_pos[1]+10,1,1,pygame.time.get_ticks(),0])
self.time=pygame.time.get_ticks()
#阳光产出判定
#向日葵类
class PeaShooter():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.cd=1400
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.pic_sum=Plant_Picsum[id]
self.time=pygame.time.get_ticks()-1000
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*pos[0])
def Draw(self):
img=pygame.image.load(self.dic+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>self.pic_sum):
self.sit=1
def Danger(self,zomlist):
if (self.hp<=0):
return
cnt=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
cnt+=1
while (cnt>=0):
self.time-=0.02
cnt-=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
if (pygame.time.get_ticks()-self.time <= self.cd):
return
flag=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
flag=1
if (flag>=1):
self.Danger(zomlist)
for zom in zomlist:
if (self.pos[0]!=zom.line):
continue
if (zom.die==True):
continue
bulllist.append(Bullet(self.screen,self.pos[0],Coordinate_origin[1]+self.pos[1]*Block_size_width-5,1))
self.time=pygame.time.get_ticks()
return
#攻击判定
#豌豆射手类
class WallNut():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.path='Nolmal\\'
self.pic_sum=Plant_Picsum[id]
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*pos[0])
def Draw(self):
img=pygame.image.load(self.dic+self.path+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>self.pic_sum):
self.sit=1
def Danger(self,zomlist):
if (self.hp<=0):
return
cnt=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
cnt+=1
while (cnt>=0):
self.hp+=0.002
cnt-=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
flag=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
flag=1
if (flag>=1):
self.Danger(zomlist)
if (self.hp<=133 and self.path!='Cracked2\\'):
self.path='Cracked2\\'
self.sit=1
self.fps=1
self.pic_sum=15
elif (133<self.hp<=267 and self.path!='Cracked1\\'):
self.path='Cracked1\\'
self.sit=1
self.fps=1
self.pic_sum=11
#检测血量并更新图片
#坚果墙类
class SnowPea():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.cd=1400
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.pic_sum=Plant_Picsum[id]
self.time=pygame.time.get_ticks()-1000
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*pos[0])
def Draw(self):
img=pygame.image.load(self.dic+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>self.pic_sum):
self.sit=1
def Danger(self,zomlist):
if (self.hp<=0):
return
cnt=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
cnt+=1
while (cnt>=0):
self.time-=0.02
cnt-=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
if (pygame.time.get_ticks()-self.time <= self.cd):
return
flag=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
flag=1
if (flag>=1):
self.Danger(zomlist)
for zom in zomlist:
if (self.pos[0]!=zom.line):
continue
if (zom.die==True):
continue
bulllist.append(Bullet(self.screen,self.pos[0],Coordinate_origin[1]+self.pos[1]*Block_size_width-5,2))
self.time=pygame.time.get_ticks()
return
#寒冰射手类,基本上同豌豆射手
class RepeaterPea():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.cd=1400
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.pic_sum=Plant_Picsum[id]
self.time=pygame.time.get_ticks()-1000
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*pos[0])
def Draw(self):
img=pygame.image.load(self.dic+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>self.pic_sum):
self.sit=1
def Danger(self,zomlist):
if (self.hp<=0):
return
cnt=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
cnt+=1
while (cnt>=0):
self.time-=0.02
cnt-=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
if (pygame.time.get_ticks()-self.time <= self.cd):
return
flag=0
for zom in zomlist:
if (zom.hp<=0):
continue
if (zom.die==True):
continue
if (zom.pos-self.pic_pos[0]<=50):
flag=1
if (flag>=1):
self.Danger(zomlist)
for zom in zomlist:
if (self.pos[0]!=zom.line):
continue
if (zom.die==True):
continue
bulllist.append(Bullet(self.screen,self.pos[0],Coordinate_origin[1]+self.pos[1]*Block_size_width-5,1))
bulllist.append(Bullet(self.screen,self.pos[0],Coordinate_origin[1]+self.pos[1]*Block_size_width+25,1))
self.time=pygame.time.get_ticks()
return
#双发射手类,同豌豆射手
class PotatoMine():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.pic_sum=Plant_Picsum[id]
self.time=pygame.time.get_ticks()
self.sleep=True
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*pos[0])
def Draw(self):
if (pygame.time.get_ticks()-self.time>=15000):
self.sleep=False
if (self.sleep==True):
img=pygame.image.load(self.dic+'sleep\\'+str(self.sit)+'.png').convert()
img.set_colorkey(WHITE)
self.screen.blit(img,self.pic_pos)
return
if (self.sit==-1):
if (self.fps==36):
self.hp=-100
return
img=pygame.image.load(self.dic+'1.png').convert()
img.set_colorkey(WHITE)
self.screen.blit(img,self.pic_pos)
self.fps+=1
return
img=pygame.image.load(self.dic+'Nolmal\\'+str(self.sit)+'.png').convert()
img.set_colorkey(WHITE)
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>self.pic_sum):
self.sit=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
if (self.sleep==True):
return
if (self.sit==-1):
return
for zom in zomlist:
if (self.pos[0]!=zom.line):
continue
if (zom.die==True):
continue
if (self.pic_pos[0]<=zom.pos and self.pic_pos[0]+5>zom.pos):
self.fps=1
self.sit=-1
zom.die=True
return
#休眠与爆炸判定
#土豆雷类
class Spikeweed():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.pic_sum=Plant_Picsum[id]
self.time=pygame.time.get_ticks()
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*(pos[0]+0.5))
def Draw(self):
img=pygame.image.load(self.dic+str(self.sit)+'.png').convert()
img.set_colorkey(WHITE)
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>self.pic_sum):
self.sit=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
if (pygame.time.get_ticks()-self.time<=700):
return
self.time=pygame.time.get_ticks()
for zom in zomlist:
if (self.pos[0]!=zom.line):
continue
if (zom.die==True):
continue
if (zom.pos<=self.pic_pos[0] and zom.pos>=self.pic_pos[0]-Block_size_width):
zom.hp-=1
#攻击判定,同时具有不可啃食特性
#地刺类
class Chomper():
def __init__(self,scr,id,pos):
self.sit=1
self.fps=1
self.pos=pos
self.screen=scr
self.hp=Plant_hp[id]
self.dic=Plant_dic[id]
self.pic_sum=Plant_Picsum[id]
self.time=pygame.time.get_ticks()
self.path='Nolmal'
self.pic_pos=(Coordinate_origin[0]+Block_size_width*pos[1],Coordinate_origin[1]+Block_size_height*pos[0]-20)
def Draw(self):
if (self.hp<=0):
return
if (self.path=='Eating' and pygame.time.get_ticks()-self.time>=42000):
self.fps=1
self.sit=1
self.path='Nolmal'
if (self.path=='Attack'):
img=pygame.image.load(self.dic+self.path+'\\'+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=3):
self.fps=1
self.sit+=1
if (self.sit>9):
self.time=pygame.time.get_ticks()
self.path='Eating'
self.fps=1
self.sit=1
return
elif (self.path=='Eating'):
img=pygame.image.load(self.dic+self.path+'\\'+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>6):
self.sit=1
else:
img=pygame.image.load(self.dic+self.path+'\\'+str(self.sit)+'.png')
self.screen.blit(img,self.pic_pos)
self.fps+=1
if (self.fps>=Plant_Move_FPS):
self.fps=1
self.sit+=1
if (self.sit>13):
self.sit=1
def Event(self,a,bulllist,zomlist):
if (self.hp<=0):
return
if (self.path=='Eating'):
return
for zom in zomlist:
if (self.pos[0]!=zom.line):
continue
if (zom.die==True):
continue
if (self.pic_pos[0]<=zom.pos and self.pic_pos[0]+5>zom.pos):
if (self.path=='Attack'):
if (self.sit>=3):
zom.die=True
return
self.time=pygame.time.get_ticks()
self.path='Attack'
self.sit=1
self.fps=1
return
#攻击与咀嚼判定
#大嘴花类 | 24.314592 | 114 | 0.624677 |
acde9360d483c1bc5c48537ac388285fbc6fbf34 | 2,806 | py | Python | tests/performance/timings.py | ElmaSanz/Pandas-profiling | 969c4e8ed2a3fe862462da8bbee928e045a7fea0 | [
"MIT"
] | 1 | 2021-04-27T06:28:52.000Z | 2021-04-27T06:28:52.000Z | tests/performance/timings.py | ElmaSanz/Pandas-profiling | 969c4e8ed2a3fe862462da8bbee928e045a7fea0 | [
"MIT"
] | 2 | 2021-03-29T11:37:57.000Z | 2021-03-29T11:39:35.000Z | tests/performance/timings.py | ElmaSanz/Pandas-profiling | 969c4e8ed2a3fe862462da8bbee928e045a7fea0 | [
"MIT"
] | null | null | null | import timeit
from itertools import product
from string import ascii_lowercase
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from pandas_profiling import ProfileReport
def generate_column_names(n):
column_names = []
iters = 1
while len(column_names) < n:
column_names += list(
"".join(combo) for combo in product(ascii_lowercase, repeat=iters)
)
iters += 1
return column_names
def make_sample_data(cols, rows):
column_names = generate_column_names(cols)
df = pd.DataFrame(
np.random.randint(0, 1000000, size=(rows, cols)), columns=column_names[0:cols]
)
df = df.astype(str)
assert df.shape == (rows, cols)
return df.copy()
def make_report_minimal(df):
report = ProfileReport(
df,
minimal=True,
pool_size=0,
sort="None",
title="Dataset with <em>Numeric</em> Categories",
)
html = report.to_html()
assert type(html) == str and '<p class="h2">Dataset info</p>' in html
def make_report(df):
report = ProfileReport(
df,
minimal=False,
pool_size=0,
sort="None",
title="Dataset with <em>Numeric</em> Categories",
)
html = report.to_html()
assert type(html) == str and '<p class="h2">Dataset info</p>' in html
def wrap_func(function):
def inner(df):
def double_inner():
return function(df)
return double_inner
return inner
def time_report(func, cols, rows, runs=5):
df = make_sample_data(cols, rows)
print(df.shape)
test = wrap_func(func)(df.copy())
return timeit.timeit(test, number=runs) / runs
def plot_col_run_time():
cols = [2, 4, 10, 50]
row = 1000
default_times = [time_report(make_report, col, row) for col in cols]
minimal_times = [time_report(make_report_minimal, col, row) for col in cols]
ax1 = sns.scatterplot(cols, default_times)
ax2 = sns.scatterplot(cols, minimal_times)
_ = ax1.set(
xlabel=f"Number of columns (row={row})",
ylabel="time (s)",
title="Run Time Complexity",
)
plt.show()
def plot_row_run_time():
# 10, 100
# https://github.com/pandas-profiling/pandas-profiling/issues/270
rows = [1000, 10000, 100000]
col = 10
default_times = [time_report(make_report, col, row) for row in rows]
minimal_times = [time_report(make_report_minimal, col, row) for row in rows]
ax1 = sns.scatterplot(rows, default_times)
ax2 = sns.scatterplot(rows, minimal_times)
_ = ax1.set(
xlabel=f"Number of rows (col={col})",
ylabel="time (s)",
title="Run Time Complexity",
)
plt.show()
if __name__ == "__main__":
plot_col_run_time()
plot_row_run_time()
| 24.614035 | 86 | 0.63685 |
acde936c5dee47fee8729f021e4ab95051c681b3 | 315 | py | Python | python/py-basic-data-types/list-comprehensions.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 41 | 2018-05-11T07:54:34.000Z | 2022-03-29T19:02:32.000Z | python/py-basic-data-types/list-comprehensions.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 2 | 2021-09-13T10:03:26.000Z | 2021-10-04T10:21:05.000Z | python/py-basic-data-types/list-comprehensions.py | PingHuskar/hackerrank | 1bfdbc63de5d0f94cd9e6ae250476b4a267662f2 | [
"Unlicense"
] | 21 | 2019-01-23T19:06:59.000Z | 2021-12-23T16:03:47.000Z | """
List Comprehensions
https://www.hackerrank.com/challenges/list-comprehensions/problem
"""
if __name__ == '__main__':
x = int(input())
y = int(input())
z = int(input())
n = int(input())
print([[i, j, k] for i in range(x + 1) for j in range(y + 1) for k in range(z + 1) if i + j + k != n])
| 22.5 | 106 | 0.574603 |
acde9390606177ee6bd9391ac31196fd84cdceeb | 8,732 | py | Python | arraycontext/pytest.py | majosm/arraycontext | c745d2e649d274506d7f3a4acd7be7fe268263d5 | [
"MIT"
] | null | null | null | arraycontext/pytest.py | majosm/arraycontext | c745d2e649d274506d7f3a4acd7be7fe268263d5 | [
"MIT"
] | null | null | null | arraycontext/pytest.py | majosm/arraycontext | c745d2e649d274506d7f3a4acd7be7fe268263d5 | [
"MIT"
] | null | null | null | """
.. currentmodule:: arraycontext
.. autoclass:: PytestPyOpenCLArrayContextFactory
.. autofunction:: pytest_generate_tests_for_array_contexts
.. autofunction:: pytest_generate_tests_for_pyopencl_array_context
"""
__copyright__ = """
Copyright (C) 2020-1 University of Illinois Board of Trustees
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from typing import Any, Callable, Dict, Sequence, Type, Union
import pyopencl as cl
from arraycontext.context import ArrayContext
# {{{ array context factories
class PytestPyOpenCLArrayContextFactory:
"""
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, device):
"""
:arg device: a :class:`pyopencl.Device`.
"""
self.device = device
def get_command_queue(self):
# Get rid of leftovers from past tests.
# CL implementations are surprisingly limited in how many
# simultaneous contexts they allow...
from pyopencl.tools import clear_first_arg_caches
clear_first_arg_caches()
from gc import collect
collect()
ctx = cl.Context([self.device])
return cl.CommandQueue(ctx)
def __call__(self) -> ArrayContext:
raise NotImplementedError
class _PyOpenCLArrayContextFactory(PytestPyOpenCLArrayContextFactory):
force_device_scalars = True
def __call__(self):
from arraycontext import PyOpenCLArrayContext
return PyOpenCLArrayContext(
self.get_command_queue(),
force_device_scalars=self.force_device_scalars)
def __str__(self):
return ("<PyOpenCLArrayContext for <pyopencl.Device '%s' on '%s'>" %
(self.device.name.strip(),
self.device.platform.name.strip()))
class _DeprecatedPyOpenCLArrayContextFactory(_PyOpenCLArrayContextFactory):
force_device_scalars = False
_ARRAY_CONTEXT_FACTORY_REGISTRY: \
Dict[str, Type[PytestPyOpenCLArrayContextFactory]] = {
"pyopencl": _PyOpenCLArrayContextFactory,
"pyopencl-deprecated": _DeprecatedPyOpenCLArrayContextFactory,
}
def register_array_context_factory(
name: str,
factory: Type[PytestPyOpenCLArrayContextFactory]) -> None:
if name in _ARRAY_CONTEXT_FACTORY_REGISTRY:
raise ValueError(f"factory '{name}' already exists")
_ARRAY_CONTEXT_FACTORY_REGISTRY[name] = factory
# }}}
# {{{ pytest integration
def pytest_generate_tests_for_array_contexts(
factories: Sequence[Union[str, Type[PytestPyOpenCLArrayContextFactory]]], *,
factory_arg_name: str = "actx_factory",
) -> Callable[[Any], None]:
"""Parametrize tests for pytest to use an :class:`~arraycontext.ArrayContext`.
Using this function in :mod:`pytest` test scripts allows you to use the
argument *factory_arg_name*, which is a callable that returns a
:class:`~arraycontext.ArrayContext`. All test functions will automatically
be run once for each implemented array context. To select specific array
context implementations explicitly define, for example,
.. code-block:: python
pytest_generate_tests = pytest_generate_tests_for_array_context([
"pyopencl",
])
to use the :mod:`pyopencl`-based array context. For :mod:`pyopencl`-based
contexts :func:`pyopencl.tools.pytest_generate_tests_for_pyopencl` is used
as a backend, which allows specifying the ``PYOPENCL_TEST`` environment
variable for device selection.
The environment variable ``ARRAYCONTEXT_TEST`` can also be used to
overwrite any chosen implementations through *factories*. This is a
comma-separated list of known array contexts.
Current supported implementations include:
* ``"pyopencl"``, which creates a :class:`~arraycontext.PyOpenCLArrayContext`
with ``force_device_scalars=True``.
* ``"pyopencl-deprecated"``, which creates a
:class:`~arraycontext.PyOpenCLArrayContext` with
``force_device_scalars=False``.
:arg factories: a list of identifiers or
:class:`PytestPyOpenCLArrayContextFactory` classes (not instances)
for which to generate test fixtures.
"""
# {{{ get all requested array context factories
import os
env_factory_string = os.environ.get("ARRAYCONTEXT_TEST", None)
if env_factory_string is not None:
unique_factories = set(env_factory_string.split(","))
else:
unique_factories = set(factories) # type: ignore[arg-type]
if not unique_factories:
raise ValueError("no array context factories were selected")
unknown_factories = [
factory for factory in unique_factories
if (isinstance(factory, str)
and factory not in _ARRAY_CONTEXT_FACTORY_REGISTRY)
]
if unknown_factories:
if env_factory_string is not None:
raise RuntimeError(
"unknown array context factories passed through environment "
f"variable 'ARRAYCONTEXT_TEST': {unknown_factories}")
else:
raise ValueError(f"unknown array contexts: {unknown_factories}")
unique_factories = set([
_ARRAY_CONTEXT_FACTORY_REGISTRY.get(factory, factory) # type: ignore[misc]
for factory in unique_factories])
# }}}
def inner(metafunc):
# {{{ get pyopencl devices
import pyopencl.tools as cl_tools
arg_names = cl_tools.get_pyopencl_fixture_arg_names(
metafunc, extra_arg_names=[factory_arg_name])
if not arg_names:
return
arg_values, ids = cl_tools.get_pyopencl_fixture_arg_values()
# }}}
# {{{ add array context factory to arguments
if factory_arg_name in arg_names:
if "ctx_factory" in arg_names or "ctx_getter" in arg_names:
raise RuntimeError(
f"Cannot use both an '{factory_arg_name}' and a "
"'ctx_factory' / 'ctx_getter' as arguments.")
arg_values_with_actx = []
for arg_dict in arg_values:
arg_values_with_actx.extend([
{factory_arg_name: factory(arg_dict["device"]), **arg_dict}
for factory in unique_factories
])
else:
arg_values_with_actx = arg_values
arg_value_tuples = [
tuple(arg_dict[name] for name in arg_names)
for arg_dict in arg_values_with_actx
]
# }}}
metafunc.parametrize(arg_names, arg_value_tuples, ids=ids)
return inner
def pytest_generate_tests_for_pyopencl_array_context(metafunc) -> None:
"""Parametrize tests for pytest to use a
:class:`~arraycontext.PyOpenCLArrayContext`.
Performs device enumeration analogously to
:func:`pyopencl.tools.pytest_generate_tests_for_pyopencl`.
Using the line:
.. code-block:: python
from arraycontext import (
pytest_generate_tests_for_pyopencl_array_context
as pytest_generate_tests)
in your pytest test scripts allows you to use the argument ``actx_factory``,
in your test functions, and they will automatically be
run once for each OpenCL device/platform in the system, as appropriate,
with an argument-less function that returns an
:class:`~arraycontext.ArrayContext` when called.
It also allows you to specify the ``PYOPENCL_TEST`` environment variable
for device selection.
"""
pytest_generate_tests_for_array_contexts([
"pyopencl-deprecated",
], factory_arg_name="actx_factory")(metafunc)
# }}}
# vim: foldmethod=marker
| 33.714286 | 84 | 0.6877 |
acde941293c0f60db1099080f303b8e0668356cf | 4,027 | py | Python | pyls/uris.py | kennydo/python-language-server | f856c20f35da51e85f76b391fe4a4c7e786932fc | [
"MIT"
] | null | null | null | pyls/uris.py | kennydo/python-language-server | f856c20f35da51e85f76b391fe4a4c7e786932fc | [
"MIT"
] | null | null | null | pyls/uris.py | kennydo/python-language-server | f856c20f35da51e85f76b391fe4a4c7e786932fc | [
"MIT"
] | null | null | null | # Copyright 2017 Palantir Technologies, Inc.
"""A collection of URI utilities with logic built on the VSCode URI library.
https://github.com/Microsoft/vscode-uri/blob/e59cab84f5df6265aed18ae5f43552d3eef13bb9/lib/index.ts
"""
import os
import re
from urllib import parse
from pyls import IS_WIN
RE_DRIVE_LETTER_PATH = re.compile(r'^\/[a-zA-Z]:')
def urlparse(uri):
"""Parse and decode the parts of a URI."""
scheme, netloc, path, params, query, fragment = parse.urlparse(uri)
return (
parse.unquote(scheme),
parse.unquote(netloc),
parse.unquote(path),
parse.unquote(params),
parse.unquote(query),
parse.unquote(fragment)
)
def urlunparse(parts):
"""Unparse and encode parts of a URI."""
scheme, netloc, path, params, query, fragment = parts
# Avoid encoding the windows drive letter colon
if RE_DRIVE_LETTER_PATH.match(path):
quoted_path = path[:3] + parse.quote(path[3:])
else:
quoted_path = parse.quote(path)
return parse.urlunparse((
parse.quote(scheme),
parse.quote(netloc),
quoted_path,
parse.quote(params),
parse.quote(query),
parse.quote(fragment)
))
def to_fs_path(uri):
"""Returns the filesystem path of the given URI.
Will handle UNC paths and normalize windows drive letters to lower-case. Also
uses the platform specific path separator. Will *not* validate the path for
invalid characters and semantics. Will *not* look at the scheme of this URI.
"""
# scheme://netloc/path;parameters?query#fragment
scheme, netloc, path, _params, _query, _fragment = urlparse(uri)
if netloc and path and scheme == 'file':
# unc path: file://shares/c$/far/boo
value = "//{}{}".format(netloc, path)
elif RE_DRIVE_LETTER_PATH.match(path):
# windows drive letter: file:///C:/far/boo
value = path[1].lower() + path[2:]
else:
# Other path
value = path
if IS_WIN:
value = value.replace('/', '\\')
return value
def from_fs_path(path):
"""Returns a URI for the given filesystem path."""
scheme = 'file'
params, query, fragment = '', '', ''
path, netloc = _normalize_win_path(path)
return urlunparse((scheme, netloc, path, params, query, fragment))
def uri_with(uri, scheme=None, netloc=None, path=None, params=None, query=None, fragment=None):
"""Return a URI with the given part(s) replaced.
Parts are decoded / encoded.
"""
old_scheme, old_netloc, old_path, old_params, old_query, old_fragment = urlparse(uri)
path, _netloc = _normalize_win_path(path)
return urlunparse((
scheme or old_scheme,
netloc or old_netloc,
path or old_path,
params or old_params,
query or old_query,
fragment or old_fragment
))
def translate_to_client_uri(local_uri):
return local_uri.replace(
os.environ['SERVER_MOUNT_PATH'],
os.environ['CLIENT_MOUNT_PATH'],
)
def translate_to_server_uri(mounted_uri):
return mounted_uri.replace(
os.environ['CLIENT_MOUNT_PATH'],
os.environ['SERVER_MOUNT_PATH'],
)
def _normalize_win_path(path):
netloc = ''
# normalize to fwd-slashes on windows,
# on other systems bwd-slaches are valid
# filename character, eg /f\oo/ba\r.txt
if IS_WIN:
path = path.replace('\\', '/')
# check for authority as used in UNC shares
# or use the path as given
if path[:2] == '//':
idx = path.index('/', 2)
if idx == -1:
netloc = path[2:]
else:
netloc = path[2:idx]
path = path[idx:]
else:
path = path
# Ensure that path starts with a slash
# or that it is at least a slash
if not path.startswith('/'):
path = '/' + path
# Normalize drive paths to lower case
if RE_DRIVE_LETTER_PATH.match(path):
path = path[0] + path[1].lower() + path[2:]
return path, netloc
| 27.772414 | 98 | 0.632977 |
acde9421b03d8cfa658a86400bb916fd0b40e603 | 423 | py | Python | django_channel_practice/wsgi.py | wasim2263/django-channel-practice | 3ef0db21f86ebb549a8f31b9444490145cd4b7eb | [
"MIT"
] | 1 | 2022-01-12T09:21:21.000Z | 2022-01-12T09:21:21.000Z | django_channel_practice/wsgi.py | wasim2263/django-channel-practice | 3ef0db21f86ebb549a8f31b9444490145cd4b7eb | [
"MIT"
] | null | null | null | django_channel_practice/wsgi.py | wasim2263/django-channel-practice | 3ef0db21f86ebb549a8f31b9444490145cd4b7eb | [
"MIT"
] | null | null | null | """
WSGI config for django_channel_practice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_channel_practice.settings')
application = get_wsgi_application()
| 24.882353 | 83 | 0.801418 |
acde9487bfe592deea0126f6a44a64f4f51ddb3b | 2,827 | py | Python | theano/misc/windows.py | mdda/Theano | 6ca7b2b65000e371f009b617d41bc5a90f022d38 | [
"BSD-3-Clause"
] | 295 | 2015-09-25T21:15:04.000Z | 2022-01-13T01:16:18.000Z | libs/Theano/theano/misc/windows.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 21 | 2015-10-28T19:06:32.000Z | 2022-03-11T23:13:05.000Z | libs/Theano/theano/misc/windows.py | shenshenzhanzhan/attention-lvcsr | 598d487c118e66875fdd625baa84ed29d283b800 | [
"MIT"
] | 114 | 2015-09-26T21:23:02.000Z | 2021-11-19T02:36:41.000Z | import os
import subprocess
def subprocess_Popen(command, **params):
"""
Utility function to work around windows behavior that open windows.
:see: call_subprocess_Popen and output_subprocess_Popen
"""
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
try:
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
# Anaconda for Windows does not always provide .exe files
# in the PATH, they also have .bat files that call the corresponding
# executable. For instance, "g++.bat" is in the PATH, not "g++.exe"
# Unless "shell=True", "g++.bat" is not executed when trying to
# execute "g++" without extensions.
# (Executing "g++.bat" explicitly would also work.)
params['shell'] = True
# Using the dummy file descriptors below is a workaround for a
# crash experienced in an unusual Python 2.4.4 Windows environment
# with the default None values.
stdin = None
if "stdin" not in params:
stdin = open(os.devnull)
params['stdin'] = stdin.fileno()
try:
proc = subprocess.Popen(command, startupinfo=startupinfo, **params)
finally:
if stdin is not None:
del stdin
return proc
def call_subprocess_Popen(command, **params):
"""
Calls subprocess_Popen and discards the output, returning only the
exit code.
"""
if 'stdout' in params or 'stderr' in params:
raise TypeError("don't use stderr or stdout with call_subprocess_Popen")
with open(os.devnull, 'wb') as null:
# stdin to devnull is a workaround for a crash in a weird Windows
# environment where sys.stdin was None
params.setdefault('stdin', null)
params['stdout'] = null
params['stderr'] = null
p = subprocess_Popen(command, **params)
returncode = p.wait()
return returncode
def output_subprocess_Popen(command, **params):
"""
Calls subprocess_Popen, returning the output, error and exit code
in a tuple.
"""
if 'stdout' in params or 'stderr' in params:
raise TypeError("don't use stderr or stdout with output_subprocess_Popen")
# stdin to devnull is a workaround for a crash in a weird Windows
# environement where sys.stdin was None
if not hasattr(params, 'stdin'):
null = open(os.devnull, 'wb')
params['stdin'] = null
params['stdout'] = subprocess.PIPE
params['stderr'] = subprocess.PIPE
p = subprocess_Popen(command, **params)
# we need to use communicate to make sure we don't deadlock around
# the stdour/stderr pipe.
out = p.communicate()
return out + (p.returncode,)
| 35.3375 | 82 | 0.653696 |
acde956103e94fa1b338d766996f66ff103f09bd | 1,162 | py | Python | mne/viz/backends/tests/_utils.py | 0reza/mne-python | da02a256423404a81929d6de278bc63d3192a280 | [
"BSD-3-Clause"
] | null | null | null | mne/viz/backends/tests/_utils.py | 0reza/mne-python | da02a256423404a81929d6de278bc63d3192a280 | [
"BSD-3-Clause"
] | null | null | null | mne/viz/backends/tests/_utils.py | 0reza/mne-python | da02a256423404a81929d6de278bc63d3192a280 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import pytest
import warnings
def has_pyvista():
"""Check that PyVista is installed."""
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista # noqa: F401
return True
except ImportError:
return False
def has_pyvistaqt():
"""Check that PyVistaQt is installed."""
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvistaqt # noqa: F401
return True
except ImportError:
return False
def has_imageio_ffmpeg():
"""Check if imageio-ffmpeg is installed."""
try:
import imageio_ffmpeg # noqa: F401
return True
except ImportError:
return False
skips_if_not_pyvistaqt = pytest.mark.skipif(
not has_pyvistaqt(), reason='requires pyvistaqt')
| 25.822222 | 74 | 0.654905 |
acde9562844120c457a647b05d85edb2b5420637 | 6,609 | py | Python | evaluation/calc_errors.py | sytk/Speech_driven_gesture_generation_with_autoencoder | 61ab0f196a5e1938551e2e823a34dff057b5ddb3 | [
"Apache-2.0"
] | 65 | 2019-03-18T09:59:43.000Z | 2022-02-20T02:30:50.000Z | evaluation/calc_errors.py | sytk/Speech_driven_gesture_generation_with_autoencoder | 61ab0f196a5e1938551e2e823a34dff057b5ddb3 | [
"Apache-2.0"
] | 10 | 2019-07-12T06:46:14.000Z | 2022-01-06T07:34:37.000Z | evaluation/calc_errors.py | sytk/Speech_driven_gesture_generation_with_autoencoder | 61ab0f196a5e1938551e2e823a34dff057b5ddb3 | [
"Apache-2.0"
] | 21 | 2019-01-17T08:47:53.000Z | 2022-02-15T13:27:27.000Z | # -*- coding: utf-8 -*-
"""
Calculating average point error
@author: kaneko.naoshi
"""
import argparse
import glob
import os
import numpy as np
from sklearn.metrics import mean_absolute_error
def read_joint_names(filename):
"""Read motion capture's body joint names from file
Args:
filename: file name to read
Returns:
joint_names: list of joint names
"""
with open(filename, 'r') as f:
org = f.read()
joint_names = org.split(',')
return joint_names
def remove_velocity(data, dim=3):
"""Remove velocity values from raw prediction data
Args:
data: array containing both position and velocity values
dim: gesture dimensionality
Returns:
np.ndarray: array containing only position values
"""
starts = np.arange(0, data.shape[1], dim * 2)
stops = np.arange(dim, data.shape[1], dim * 2)
return np.hstack([data[:, i:j] for i, j in zip(starts, stops)])
def MAE(original, predicted, dim=3):
"""Compute Mean Absolute Error (MAE)
Args:
original: array containing joint positions of original gesture
predicted: array containing joint positions of predicted gesture
dim: gesture dimensionality
Returns:
mae: MAE between original and predicted for each joint
"""
num_frames = predicted.shape[0]
diffs = mean_absolute_error(original[:num_frames], predicted,
multioutput='raw_values')
num_joints = predicted.shape[1] // dim
mae = np.empty(num_joints)
for i in range(num_joints):
x1 = i * dim + 0
x2 = i * dim + dim
mae[i] = np.mean(diffs[x1:x2])
return mae
def APE(original, predicted, dim=3):
"""Compute Average Position Error (APE)
Args:
original: array containing joint positions of original gesture
predicted: array containing joint positions of predicted gesture
dim: gesture dimensionality
Returns:
np.ndarray: APE between original and predicted for each joint
"""
num_frames = predicted.shape[0]
num_joints = predicted.shape[1] // dim
diffs = np.zeros((num_frames, num_joints))
for i in range(num_frames):
for j in range(num_joints):
x1 = j * dim + 0
x2 = j * dim + dim
diffs[i, j] = np.linalg.norm(
original[i, x1:x2] - predicted[i, x1:x2])
return np.mean(diffs, axis=0)
def main():
metrics = {
'mae': MAE,
'ape': APE,
}
parser = argparse.ArgumentParser(
description='Calculate prediction errors')
parser.add_argument('--original', '-o', default='data/original',
help='Original gesture directory')
parser.add_argument('--predicted', '-p', default='data/predicted',
help='Predicted gesture directory')
parser.add_argument('--joints', '-j', default='joints.txt',
help='Joint name file')
parser.add_argument('--gesture', '-g', required=True,
help='Directory storing predicted txt files')
parser.add_argument('--metric', '-m', default='ape',
help='Error metric (ape or mae)')
parser.add_argument('--select', '-s', nargs='+',
help='Joint subset to compute (if omitted, use all)')
parser.add_argument('--out', default='result',
help='Directory to output the result')
args = parser.parse_args()
predicted_dir = os.path.join(args.predicted, args.gesture)
original_files = sorted(glob.glob(os.path.join(args.original, '*.txt')))
predicted_files = sorted(glob.glob(os.path.join(predicted_dir, '*.txt')))
# Check number of files
if len(original_files) != len(predicted_files):
raise ValueError('Inconsistent number of files : {} vs {}'
''.format(len(original_files), len(predicted_files)))
# Check if error metric was correct
if args.metric not in metrics:
raise ValueError('Unknown metric: \'{}\'. Choose from {}'
''.format(args.metric, list(metrics.keys())))
joint_names = read_joint_names(args.joints)
if args.select is not None:
selected_joints = []
for s in args.select:
try:
index = joint_names.index(s)
except ValueError:
print('Ignore invalid joint: {}'.format(s))
else:
selected_joints.append(index)
selected_joints.sort()
if len(selected_joints) == 0:
selected_joints = range(len(joint_names))
print('No valid joints are selected. Use all joints')
else:
# Use all joints
selected_joints = range(len(joint_names))
joint_names = [joint_names[s] for s in selected_joints]
out_lines = [','.join(['file'] + joint_names) + '\n']
errors = []
for original_file, predicted_file in zip(original_files, predicted_files):
original = np.loadtxt(original_file)
predicted = np.loadtxt(predicted_file)
if original.shape[0] != predicted.shape[0]:
# Cut them to the same length
length = min(original.shape[0], predicted.shape[0])
original = original[:length]
predicted = predicted[:length]
if predicted.shape[1] == 192 * 2:
print(predicted.shape)
print("Removing the velocity")
# Remove the velocity
predicted = remove_velocity(predicted)
error = metrics[args.metric](original, predicted)[selected_joints]
errors.append(error)
basename = os.path.basename(predicted_file)
line = basename
for e in error:
line += ',' + str(e)
line += '\n'
out_lines.append(line)
average_line = 'Average'
avgs = np.mean(errors, axis=0)
for a in avgs:
average_line += ',' + str(a)
out_lines.append(average_line)
out_dir = os.path.join(args.out, args.gesture)
# Make output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
outname = os.path.join(out_dir, '{}.csv'.format(args.metric))
with open(outname, 'w') as out_file:
out_file.writelines(out_lines)
print('More detailed result was writen to the file: ' + outname)
print('')
print('{}: {:.2f}'.format(args.metric.upper(), np.mean(errors)))
if __name__ == '__main__':
main()
| 30.178082 | 78 | 0.592828 |
acde95a8967fca7558482574d2570d1c0857b56f | 884 | py | Python | tag/migrations/0001_initial.py | b3h3rkz/deven | 27991dfae7e6e4851622edeac5efae8a2ff11a1c | [
"MIT"
] | 1 | 2016-08-29T12:18:02.000Z | 2016-08-29T12:18:02.000Z | tag/migrations/0001_initial.py | b3h3rkz/deven | 27991dfae7e6e4851622edeac5efae8a2ff11a1c | [
"MIT"
] | null | null | null | tag/migrations/0001_initial.py | b3h3rkz/deven | 27991dfae7e6e4851622edeac5efae8a2ff11a1c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-24 19:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, unique=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 30.482759 | 118 | 0.642534 |
acde961958e1d7d09f595cf28c431a30a2181c70 | 10,345 | py | Python | layers/output_utils.py | bihanli/yolactBH | 756bc91c5110e532abe75729c905f70cfb988c23 | [
"MIT"
] | null | null | null | layers/output_utils.py | bihanli/yolactBH | 756bc91c5110e532abe75729c905f70cfb988c23 | [
"MIT"
] | null | null | null | layers/output_utils.py | bihanli/yolactBH | 756bc91c5110e532abe75729c905f70cfb988c23 | [
"MIT"
] | 1 | 2021-08-10T15:11:49.000Z | 2021-08-10T15:11:49.000Z | """ Contains functions used to sanitize and prepare the output of Yolact. """
import torchvision
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
from PIL import Image
import matplotlib.pyplot as plt
from data import cfg, mask_type, MEANS, STD, activation_func
from utils.augmentations import Resize
from utils import timer
from .box_utils import crop, sanitize_coordinates
def postprocess(det_output, w, h, batch_idx=0, interpolation_mode='bilinear',
visualize_lincomb=False, crop_masks=True, score_threshold=0):
"""
Postprocesses the output of Yolact on testing mode into a format that makes sense,
accounting for all the possible configuration settings.
Args:
- det_output: The lost of dicts that Detect outputs.
- w: The real with of the image.
- h: The real height of the image.
- batch_idx: If you have multiple images for this batch, the image's index in the batch.
- interpolation_mode: Can be 'nearest' | 'area' | 'bilinear' (see torch.nn.functional.interpolate)
Returns 4 torch Tensors (in the following order):
- classes [num_det]: The class idx for each detection.
- scores [num_det]: The confidence score for each detection.
- boxes [num_det, 4]: The bounding box for each detection in absolute point form.
- masks [num_det, h, w]: Full image masks for each detection.
"""
dets = det_output[batch_idx]
net = dets['net']
dets = dets['detection']
if dets is None:
return [torch.Tensor()] * 4 # Warning, this is 4 copies of the same thing
if score_threshold > 0:
keep = dets['score'] > score_threshold
for k in dets:
if k != 'proto':
dets[k] = dets[k][keep]
if dets['score'].size(0) == 0:
return [torch.Tensor()] * 4
#visualize_lincomb=True#........................
# Actually extract everything from dets now
classes = dets['class']
boxes = dets['box']
scores = dets['score']
masks = dets['mask']#19,32
#for i,j in dets.items():
# print(i,j.shape)
if cfg.mask_type == mask_type.lincomb and cfg.eval_mask_branch:
# At this points masks is only the coefficients
proto_data = dets['proto']#138,138,32
# Test flag, do not upvote
if cfg.mask_proto_debug:
np.save('scripts/proto.npy', proto_data.cpu().numpy())
#print(masks.shape)
if visualize_lincomb:
display_lincomb(proto_data, masks)
#print(proto_data.shape,masks.shape)
masks = proto_data @ masks.t()#transpose
masks = cfg.mask_proto_mask_activation(masks)
#---------------------
def feature_imshow(inp, title=None):
"""Imshow for Tensor."""
#inp = inp.detach().numpy().transpose((1, 2, 0))
mean = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
std = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
#inp*=255
plt.pause(0.001)
return inp
"""
maskss=masks.permute(2,0,1).cpu()
#print(maskss.shape)
masksss=maskss.unsqueeze(1)
ma=[]
ma.append(masksss[0,:,:,:])
ma.append(masksss[3,:,:,:])
ma.append(masksss[4,:,:,:])
ma.append(masksss[5,:,:,:])
ma.append(masksss[7,:,:,:])
ma.append(masksss[11,:,:,:])
ma.append(masksss[14,:,:,:])
ma.append(masksss[15,:,:,:])
masksss=torch.Tensor([item.cpu().detach().numpy() for item in ma]).cuda()
"""
#thesis,fig8
"""
masksss = torchvision.utils.make_grid(masksss,nrow=4)
masksss=masksss.permute(1,2,0).cpu()
masksss=masksss[:,:,0]
masksss=masksss.cpu().numpy()
masksss=Image.fromarray((masksss*255).astype(np.uint16))
plt.imshow(masksss)
plt.axis('off')
plt.savefig("masksss.png")
#plt.show()
"""
#-------------------------------
# Crop masks before upsampling because you know why
#print(33,masks.shape)
if crop_masks:
masks = crop(masks, boxes)
#print(22,masks.shape)
# Permute into the correct output shape [num_dets, proto_h, proto_w]
masks = masks.permute(2, 0, 1).contiguous()
#print(11,masks.shape)
if cfg.use_maskiou:#T
with timer.env('maskiou_net'):
with torch.no_grad():
maskiou_p = net.maskiou_net(masks.unsqueeze(1))
#print(maskiou_p.shape,classes)n,13 n
maskiou_p = torch.gather(maskiou_p, dim=1, index=classes.unsqueeze(1)).squeeze(1)
#print(maskiou_p,scores.shape)n,n
if cfg.rescore_mask:#T
if cfg.rescore_bbox:#F
scores = scores * maskiou_p
else:
scores = [scores, scores * maskiou_p]
#print(masks.shape)
# Scale masks up to the full image
masks = F.interpolate(masks.unsqueeze(0), (h, w), mode=interpolation_mode, align_corners=False).squeeze(0)
#thesis,fig9a
"""
#print(masks.shape)
masks_s=masks.unsqueeze(1)
#print(masks_s.shape)
masks_s=torchvision.utils.make_grid(masks_s,nrow=4,padding=1)
#print(masks_s.shape)
masks_s=masks_s.cpu().numpy()
masks_s=masks_s.transpose((1,2,0))
masks_s=masks_s[:,:,0]
#print(masks_s.shape)
masks_s=Image.fromarray((masks_s*255).astype(np.uint16))
plt.imshow(masks_s)
plt.axis('off')
plt.savefig("masks_s.png")
#plt.show()
"""
#thesis,fig9b
"""
mm=masks.cpu().numpy()
mmm=mm.sum(axis=0)
#print(mmm.shape)
mmm=Image.fromarray((mmm*255).astype(np.uint16))
plt.imshow(mmm)
plt.axis('off')
plt.savefig("mmm.png")
#plt.show()
"""
# Binarize the masks
masks.gt_(0.5)
boxes[:, 0], boxes[:, 2] = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, cast=False)
boxes[:, 1], boxes[:, 3] = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, cast=False)
boxes = boxes.long()
if cfg.mask_type == mask_type.direct and cfg.eval_mask_branch:
# Upscale masks
full_masks = torch.zeros(masks.size(0), h, w)
for jdx in range(masks.size(0)):
x1, y1, x2, y2 = boxes[jdx, :]
mask_w = x2 - x1
mask_h = y2 - y1
# Just in case
if mask_w * mask_h <= 0 or mask_w < 0:
continue
mask = masks[jdx, :].view(1, 1, cfg.mask_size, cfg.mask_size)
mask = F.interpolate(mask, (mask_h, mask_w), mode=interpolation_mode, align_corners=False)
mask = mask.gt(0.5).float()
full_masks[jdx, y1:y2, x1:x2] = mask
masks = full_masks
return classes, scores, boxes, masks
def undo_image_transformation(img, w, h):
"""
Takes a transformed image tensor and returns a numpy ndarray that is untransformed.
Arguments w and h are the original height and width of the image.
"""
img_numpy = img.permute(1, 2, 0).cpu().numpy()
img_numpy = img_numpy[:, :, (2, 1, 0)] # To BRG
if cfg.backbone.transform.normalize:
img_numpy = (img_numpy * np.array(STD) + np.array(MEANS)) / 255.0
elif cfg.backbone.transform.subtract_means:
img_numpy = (img_numpy / 255.0 + np.array(MEANS) / 255.0).astype(np.float32)
img_numpy = img_numpy[:, :, (2, 1, 0)] # To RGB
img_numpy = np.clip(img_numpy, 0, 1)
#print(img_numpy.shape)
return cv2.resize(img_numpy, (w,h))
def display_lincomb(proto_data, masks):
out_masks = torch.matmul(proto_data, masks.t())
# out_masks = cfg.mask_proto_mask_activation(out_masks)
for kdx in range(16):
jdx = kdx
coeffs = masks[jdx, :].cpu().numpy()
idx = np.argsort(-np.abs(coeffs))
#x=np.arange(-0.5,32.5,1)
#y=[0]*33
plt.bar(list(range(idx.shape[0])), coeffs[idx],1,edgecolor='k')
#plt.plot(x,y,'k',linewidth=0.5)
plt.axis([-0.5,31.5,-1,1])
plt.savefig("bar"+str(jdx)+".png")
#plt.show()
coeffs_sort = coeffs[idx]
arr_h, arr_w = (4,8)
proto_h, proto_w, _ = proto_data.size()
arr_img = np.zeros([proto_h*arr_h, proto_w*arr_w])
arr_run = np.zeros([proto_h*arr_h, proto_w*arr_w])
test = torch.sum(proto_data, -1).cpu().numpy()
for y in range(arr_h):
for x in range(arr_w):
i = arr_w * y + x
if i == 0:
running_total = proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]
else:
running_total += proto_data[:, :, idx[i]].cpu().numpy() * coeffs_sort[i]
running_total_nonlin = running_total
if cfg.mask_proto_mask_activation == activation_func.sigmoid:
running_total_nonlin = (1/(1+np.exp(-running_total_nonlin)))
arr_img[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (proto_data[:, :, idx[i]] / torch.max(proto_data[:, :, idx[i]])).cpu().numpy() * coeffs_sort[i]
arr_run[y*proto_h:(y+1)*proto_h, x*proto_w:(x+1)*proto_w] = (running_total_nonlin > 0.5).astype(np.float)
plt.imshow(arr_img)
plt.axis('off')
plt.savefig("arr_img"+str(jdx)+".png")
#plt.show()
plt.imshow(arr_run)
plt.axis('off')
plt.savefig("arr_run"+str(jdx)+".png")
#plt.show()
plt.imshow(test)
plt.axis('off')
plt.savefig("test"+str(jdx)+".png")
#plt.show()
plt.imshow(out_masks[:, :, jdx].cpu().numpy())
plt.axis('off')
plt.savefig("out_masks"+str(jdx)+".png")
#plt.show()
| 36.426056 | 171 | 0.552151 |
acde964188c68ca66e40b25b1f56f18e300802a4 | 5,637 | py | Python | apps/games/g_2048/main.py | hpagseddy/ZPUI | b82819e523987639c2dfab417f9895d7cd7ce049 | [
"Apache-2.0",
"MIT"
] | null | null | null | apps/games/g_2048/main.py | hpagseddy/ZPUI | b82819e523987639c2dfab417f9895d7cd7ce049 | [
"Apache-2.0",
"MIT"
] | 2 | 2020-01-17T00:44:53.000Z | 2020-01-19T21:10:48.000Z | apps/games/g_2048/main.py | hpagseddy/ZPUI | b82819e523987639c2dfab417f9895d7cd7ce049 | [
"Apache-2.0",
"MIT"
] | 1 | 2020-01-14T22:44:27.000Z | 2020-01-14T22:44:27.000Z | from threading import Event, Lock
from time import sleep
from apps import ZeroApp
from ui import DialogBox, ffs
from helpers import ExitHelper, local_path_gen
from logic import GameOf2048
class GameApp(ZeroApp):
game = None
do_exit = None
is_processing = None
menu_name = "2048"
def on_start(self):
self.local_path = local_path_gen(__name__)
try:
with open(self.local_path("score.txt"), "r") as f:
# read the highscore from score.txt (integer on first line)
# if it doesn't exist, it's set to 0.
prev_score = f.readline()
if prev_score == '':
self.prev_score = 0
else:
self.prev_score = int(prev_score)
except IOError:
with open(self.local_path("score.txt"), "w") as f:
f.write('0')
self.prev_score = 0
self.do_exit = Event()
self.moving = Lock()
if self.game is None:
# No game started yet, starting
self.start_new_game()
elif self.game.get_game_state() == 'lose':
start_new = DialogBox("ync", self.i, self.o, message="Last game lost, start new?").activate()
if start_new is None:
return # Picked cancel, exiting the app
elif start_new is True:
self.start_new_game()
# By now, the `game` property should have a game
# Let's launch the main loop
while not self.do_exit.isSet():
self.game_loop()
def start_new_game(self):
self.game = GameOf2048(4, 4)
def set_keymap(self):
keymap = {"KEY_LEFT": lambda: self.make_a_move("left"),
"KEY_RIGHT": lambda: self.make_a_move("right"),
"KEY_UP": lambda: self.make_a_move("up"),
"KEY_DOWN": lambda: self.make_a_move("down"),
"KEY_ENTER": self.confirm_exit}
self.i.stop_listen()
self.i.set_keymap(keymap)
self.i.listen()
def confirm_exit(self):
with self.moving:
if self.game.get_game_state() == 'not over':
choices = ["n", ["Restart", "restart"], "y"]
else:
choices = ["y", ["Restart", "restart"], "n"]
choice = DialogBox(choices, self.i, self.o, message="Exit the game?").activate()
if choice == "restart":
self.write_score() # write score if user restarts
self.start_new_game()
self.set_keymap()
self.refresh()
elif choice is True:
self.write_score() # write score if user exits
self.do_exit.set()
else:
self.set_keymap()
self.refresh()
def make_a_move(self, direction):
with self.moving:
assert (direction in ["up", "down", "left", "right"])
getattr(self.game, direction)()
self.refresh()
def game_loop(self):
self.set_keymap()
self.refresh()
while self.game.get_game_state() == 'not over' and not self.do_exit.isSet():
sleep(1)
if self.do_exit.isSet():
self.write_score()
return
# Waiting for player to click any of five primary keys
# Then, prompting to restart the game
eh = ExitHelper(self.i, keys=self.i.reserved_keys).start()
while eh.do_run():
sleep(0.1)
do_restart = DialogBox("ync", self.i, self.o, message="Restart the game?").activate()
if do_restart is None: # Cancel, leaving the playing field as-is
return
elif do_restart is False: # No, not restarting, thus exiting the game
self.write_score() # write score if user exits
self.do_exit.set()
else:
self.write_score() # write score if user restarts
self.start_new_game() # Yes, restarting (game_loop will be entered once more from on_start() )
def display_field(self, field):
assert len(field) == 4, "Can't display a field that's not 4x4!"
assert len(field[0]) == 4, "Can't display a field that's not 4x4!"
display_data = []
space_for_each_number = self.o.cols / len(field[0])
for field_row in field:
field_row_str = [str(i) if i else "." for i in field_row]
display_row = "".join(str(i).center(space_for_each_number) for i in field_row_str)
display_data.append(display_row.ljust(self.o.cols))
display_data.append("" * self.o.cols)
# Replacing the center row with the game state, if applicable
game_state = self.game.get_game_state()
state_str = {"win": "You won!",
"lose": "You lost!",
"not over": ""}[game_state]
display_data[3] = state_str.center(self.o.cols)
# Footer - score
display_data[7] = str(str(self.game.score) + " - " + str(self.prev_score)).center(self.o.cols)
return display_data
def refresh(self):
displayed_field = self.display_field(self.game.get_field())
self.o.display_data(*displayed_field)
def write_score(self):
# overwrite score file if current score is higher than the previous highscore
# the previous highscore is determined in on_start()
if self.game.score > self.prev_score:
with open(self.local_path("score.txt"), "w") as f:
f.write(str(self.game.score))
self.prev_score = self.game.score
| 39.697183 | 107 | 0.565904 |
acde989a61f71159a90daeb1f420b11c4d5a7979 | 6,794 | py | Python | Kai/crab/NANOv7_NoveCampaign/2017/crab_script_2017_Mu_E.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | 1 | 2022-01-17T17:29:38.000Z | 2022-01-17T17:29:38.000Z | Kai/crab/NANOv7_NoveCampaign/2017/crab_script_2017_Mu_E.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | null | null | null | Kai/crab/NANOv7_NoveCampaign/2017/crab_script_2017_Mu_E.py | NJManganelli/FourTopNAOD | 9743d5b49bdbad27a74abb7b2d5b7295f678a0e3 | [
"Apache-2.0"
] | 1 | 2021-12-15T10:56:50.000Z | 2021-12-15T10:56:50.000Z | #!/usr/bin/env python
import os, time, collections, copy, json, multiprocessing
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import *
from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetHelperRun2 import *
from FourTopNAOD.Kai.modules.LeptonSkimmer import *
from FourTopNAOD.Kai.modules.JetMETSkimmer import *
isData = True
isUltraLegacy = False
era = "2017"
subera = "E"
thePreselection = None
crossSection = None
equivLumi = 41.53
nEventsPositive = None
nEventsNegative = None
sumWeights = None
TriggerChannel = "Mu"
JESUnc = "Merged" # options: "All", "Merged", "Total"
theFiles = inputFiles()
GoldenJSON = {"2016": {"non-UL": "Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt",
"UL": "Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt"
},
"2017": {"non-UL": "Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt",
"UL": "Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt"
},
"2018": {"non-UL": "Cert_314472-325175_13TeV_17SeptEarlyReReco2018ABC_PromptEraD_Collisions18_JSON.txt",
"UL": "Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt"
}
}
if isData:
theLumis = os.path.join(os.environ["CMSSW_BASE"], "python/FourTopNAOD/Kai/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
print("Loading Golden Json: {}".format(theLumis))
if not os.path.isfile(theLumis):
theLumis = os.path.join(os.environ["CMSSW_BASE"], "src/FourTopNAOD/Kai/python/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
if not os.path.isfile(theLumis):
raise RuntimeError("Valid GoldenJSON file not found, if running on CRAB try a new scram build before resubmitting")
else:
theLumis = None
moduleCache = []
if not isData:
if era == "2016":
moduleCache.append(puWeight_2016())
elif era == "2017":
moduleCache.append(puWeight_2017())
elif era == "2018":
moduleCache.append(puWeight_2018())
else:
raise RuntimeError("Unexpected era identifier {}".format(era))
if JESUnc in ["All", "Merged"]: #btag POG provides all JEC unc sources, except for RelativeSample
btagjes_sources = ['jes', 'jesAbsoluteMPFBias', 'jesAbsoluteScale', 'jesAbsoluteStat', 'jesFlavorQCD', 'jesFragmentation', 'jesPileUpDataMC', 'jesPileUpPtBB', 'jesPileUpPtEC1', 'jesPileUpPtEC2', 'jesPileUpPtHF', 'jesPileUpPtRef', 'jesRelativeBal', 'jesRelativeFSR', 'jesRelativeJEREC1', 'jesRelativeJEREC2', 'jesRelativeJERHF', 'jesRelativePtBB', 'jesRelativePtEC1', 'jesRelativePtEC2', 'jesRelativePtHF', 'jesRelativeStatEC', 'jesRelativeStatFSR', 'jesRelativeStatHF', 'jesSinglePionECAL', 'jesSinglePionHCAL', 'jesTimePtEta']
# if JESUnc == "Merged": #no btag shape unc for regrouped JEC available, so use the total one ("jes") and the remaining single ones that are not grouped (see also: https://docs.google.com/spreadsheets/d/1Feuj1n0MdotcPq19Mht7SUIgvkXkA4hiB0BxEuBShLw/edit#gid=1345121349)
# btagjes_sources = ['jes', 'jesFlavorQCD','jesPileUpPtEC2', 'jesRelativeBal']
else:
btagjes_sources = ['jes']
moduleCache.append(btagSFProducer(era,
algo="deepjet",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
moduleCache.append(btagSFProducer(era,
algo="deepcsv",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
#Need to make it into a function, so extra () pair...
jmeModule = createJMECorrector(isMC=(not isData),
dataYear=int(era),
runPeriod=subera if isData else None,
jesUncert=JESUnc,
jetType="AK4PFchs",
noGroom=False,
metBranchName="METFixEE2017" if era == "2017" else "MET",
applySmearing=True,
isFastSim=False,
applyHEMfix=True if era == "2018" and isUltraLegacy else False,
splitJER=False,
saveMETUncs=['T1', 'T1Smear']
)
moduleCache.append(jmeModule())
moduleCache.append(TriggerAndLeptonSkimmer('baseline',
era=era,
subera=subera,
isData=isData,
TriggerChannel=TriggerChannel,
fillHists=False,
mode="Flag",
)
)
moduleCache.append(JetMETSkimmer(jetMinPt=20.0,
jetMaxEta=2.4 if era == "2016" else 2.5,
jetMinID=0b010,
jetMinCount=4,
minPseudoHT=350,
fillHists=False
)
)
p=PostProcessor(".",
theFiles,
modules=moduleCache,
cut=thePreselection,
provenance=True,
fwkJobReport=True,
jsonInput=theLumis,
histFileName="hist.root",
histDirName="plots",
branchsel=None,
outputbranchsel=None,
compression="LZMA:9",
friend=False,
postfix=None,
noOut=False,
justcount=False,
haddFileName="tree.root",
maxEntries=None,
firstEntry=0,
prefetch=True,
longTermCache=False
)
p.run()
| 51.082707 | 535 | 0.54975 |
acde989e3f6824e59a07295d19ec43da03641f30 | 64,074 | py | Python | tests/serialization/test_dag_serialization.py | danneaves-ee/airflow | 077bacd9e7cc066365d3f201be2a5d9b108350fb | [
"Apache-2.0"
] | 3 | 2021-07-30T16:44:13.000Z | 2021-08-03T13:52:09.000Z | tests/serialization/test_dag_serialization.py | danneaves-ee/airflow | 077bacd9e7cc066365d3f201be2a5d9b108350fb | [
"Apache-2.0"
] | 9 | 2022-01-20T11:48:35.000Z | 2022-03-21T19:50:14.000Z | tests/serialization/test_dag_serialization.py | danneaves-ee/airflow | 077bacd9e7cc066365d3f201be2a5d9b108350fb | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import copy
import importlib
import importlib.util
import json
import multiprocessing
import os
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from airflow.exceptions import SerializationError
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.param import Param, ParamsDict
from airflow.models.xcom import XCom
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import (
SerializedBaseOperator,
SerializedDAG,
SerializedTaskGroup,
)
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.utils import timezone
from airflow.utils.context import Context
from airflow.utils.task_group import TaskGroup
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
from tests.test_utils.timetables import CustomSerializationTimetable, cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": ['.sh', '.bash'],
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
},
],
"schedule_interval": {"__type": "timedelta", "__var": 86400.0},
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
"params": {},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
CUSTOM_TIMETABLE_SERIALIZED = {
"__type": "tests.test_utils.timetables.CustomSerializationTimetable",
"__var": {"value": "foo"},
}
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def get_timetable_based_simple_dag(timetable):
"""Create a simple_dag variant that uses timetable instead of schedule_interval."""
dag = collect_dags(["airflow/example_dags"])["simple_dag"]
dag.timetable = timetable
dag.schedule_interval = timetable.summary
return dag
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
@pytest.fixture()
def timetable_plugin(monkeypatch):
"""Patch plugins manager to always and only return our custom timetable."""
from airflow import plugins_manager
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{"tests.test_utils.timetables.CustomSerializationTimetable": CustomSerializationTimetable},
)
class TestStringifiedDAGs:
"""Unit tests for stringified DAGs."""
def setup_method(self):
self.backup_base_hook_get_connection = BaseHook.get_connection
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def teardown_method(self):
BaseHook.get_connection = self.backup_base_hook_get_connection
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
@pytest.mark.parametrize(
"timetable, serialized_timetable",
[
(
cron_timetable("0 0 * * *"),
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "0 0 * * *", "timezone": "UTC"},
},
),
(
CustomSerializationTimetable("foo"),
CUSTOM_TIMETABLE_SERIALIZED,
),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_serialization_to_timetable(self, timetable, serialized_timetable):
"""Verify a timetable-backed schedule_interval is excluded in serialization."""
dag = get_timetable_based_simple_dag(timetable)
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
expected = copy.deepcopy(serialized_simple_dag_ground_truth)
del expected["dag"]["schedule_interval"]
expected["dag"]["timetable"] = serialized_timetable
self.validate_serialized_dag(serialized_dag, expected)
def test_dag_serialization_unregistered_custom_timetable(self):
"""Verify serialization fails without timetable registration."""
dag = get_timetable_based_simple_dag(CustomSerializationTimetable("bar"))
with pytest.raises(SerializationError) as ctx:
SerializedDAG.to_dict(dag)
message = (
"Failed to serialize DAG 'simple_dag': Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
@pytest.mark.parametrize(
"timetable",
[cron_timetable("0 0 * * *"), CustomSerializationTimetable("foo")],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_roundtrip_from_timetable(self, timetable):
"""Verify a timetable-backed serialization can be deserialized."""
dag = get_timetable_based_simple_dag(timetable)
roundtripped = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(roundtripped, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually.
'timetable',
'timezone',
# Need to check fields in it, to exclude functions.
'default_args',
"_task_group",
'params',
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timetable.summary == dag.timetable.summary
assert serialized_dag.timetable.serialize() == dag.timetable.serialize()
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_ext',
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
'params',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_ext) == set(task.template_ext)
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Ugly hack as some operators override params var in their init
if isinstance(task.params, ParamsDict):
assert serialized_task.params.dump() == task.params.dump()
# Check that for Deserialized task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@pytest.mark.parametrize(
"dag_start_date, task_start_date, expected_task_start_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
],
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"dag_end_date, task_end_date, expected_task_end_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
],
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@pytest.mark.parametrize(
"serialized_timetable, expected_timetable",
[
({"__type": "airflow.timetables.simple.NullTimetable", "__var": {}}, NullTimetable()),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
cron_timetable("0 0 * * 0"),
),
({"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}}, OnceTimetable()),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
delta_timetable(timedelta(days=1)),
),
(CUSTOM_TIMETABLE_SERIALIZED, CustomSerializationTimetable("foo")),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable(
self,
serialized_timetable,
expected_timetable,
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
def test_deserialization_timetable_unregistered(self):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": CUSTOM_TIMETABLE_SERIALIZED,
},
}
SerializedDAG.validate_schema(serialized)
with pytest.raises(ValueError) as ctx:
SerializedDAG.from_dict(serialized)
message = (
"Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
@pytest.mark.parametrize(
"serialized_schedule_interval, expected_timetable",
[
(None, NullTimetable()),
("@weekly", cron_timetable("0 0 * * 0")),
("@once", OnceTimetable()),
(
{"__type": "timedelta", "__var": 86400.0},
delta_timetable(timedelta(days=1)),
),
],
)
def test_deserialization_schedule_interval(
self,
serialized_schedule_interval,
expected_timetable,
):
"""Test DAGs serialized before 2.2 can be correctly deserialized."""
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
@pytest.mark.parametrize(
"val, expected",
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
],
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, {"param_1": {1, 2, 3}}),
],
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag_json = SerializedDAG.to_json(dag)
serialized_dag = json.loads(serialized_dag_json)
assert "params" in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params.dump()
assert expected_val == deserialized_simple_task.params.dump()
def test_invalid_params(self):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
class S3Param(Param):
def __init__(self, path: str):
schema = {"type": "string", "pattern": r"s3:\/\/(.+?)\/(.+)"}
super().__init__(default=path, schema=schema)
dag = DAG(dag_id='simple_dag', params={'path': S3Param('s3://my_bucket/my_path')})
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
dag = DAG(dag_id='simple_dag')
BaseOperator(
task_id='simple_task',
dag=dag,
start_date=datetime(2019, 8, 1),
params={'path': S3Param('s3://my_bucket/my_path')},
)
@pytest.mark.parametrize(
'param',
[
Param('my value', description='hello', schema={'type': 'string'}),
Param('my value', description='hello'),
Param(None, description=None),
],
)
def test_full_param_roundtrip(self, param):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
dag = DAG(dag_id='simple_dag', params={'my_param': param})
serialized_json = SerializedDAG.to_json(dag)
serialized = json.loads(serialized_json)
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["my_param"] == param.value
observed_param = dag.params.get_param('my_param')
assert isinstance(observed_param, Param)
assert observed_param.description == param.description
assert observed_param.schema == param.schema
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
({"param_1": {1, 2, 3}}, {"param_1": {1, 2, 3}}),
],
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params.dump()
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
XCom.set(
key='search_query',
value="dummy_value_1",
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self, caplog):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context: Context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with caplog.at_level("ERROR", logger="airflow.serialization.serialized_objects"):
SerializedDAG.from_dict(serialized_dag)
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' not registered"
)
assert expected_err_msg in caplog.text
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
XCom.set(
key='search_query',
value=["dummy_value_1", "dummy_value_2"],
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@pytest.mark.parametrize(
"templated_field, expected_field",
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
],
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
"params",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = {k: v for (k, v) in vars(base_operator).items() if k in BaseOperator.get_serialized_fields()}
assert fields == {
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_pre_execute_hook': None,
'_post_execute_hook': None,
'depends_on_past': False,
'downstream_task_ids': set(),
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'execution_timeout': None,
'executor_config': {},
'max_active_tis_per_dag': None,
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
}, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_operator_deserialize_old_names(self):
blob = {
"task_id": "custom_task",
"_downstream_task_ids": ['foo'],
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
"ui_color": "#fff",
"ui_fgcolor": "#000",
}
SerializedDAG._json_schema.validate(blob, _schema=load_dag_schema_dict()['definitions']['operator'])
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.downstream_task_ids == {'foo'}
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@pytest.mark.parametrize(
"mode, expect_custom_deps",
[
("poke", False),
("reschedule", True),
],
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context: Context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@pytest.mark.parametrize(
"passed_success_callback, expected_value",
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@pytest.mark.parametrize(
"passed_failure_callback, expected_value",
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@pytest.mark.parametrize(
"object_to_serialized, expected_output",
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
],
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_params_upgrade(self):
"""when pre-2.2.0 param (i.e. primitive) is deserialized we convert to Param"""
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {"none": None, "str": "str", "dict": {"a": "b"}},
},
}
dag = SerializedDAG.from_dict(serialized)
assert dag.params["none"] is None
assert isinstance(dag.params.get_param("none"), Param)
assert dag.params["str"] == "str"
def test_params_serialize_default_2_2_0(self):
"""In 2.0.0, param ``default`` was assumed to be json-serializable objects and were not run though
the standard serializer function. In 2.2.2 we serialize param ``default``. We keep this
test only to ensure that params stored in 2.2.0 can still be parsed correctly."""
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {"str": {"__class": "airflow.models.param.Param", "default": "str"}},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert isinstance(dag.params.get_param("str"), Param)
assert dag.params["str"] == "str"
def test_params_serialize_default(self):
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": '/path/to/file.py',
"tasks": [],
"timezone": "UTC",
"params": {
"my_param": {
"default": "a string value",
"description": "hello",
"schema": {"__var": {"type": "string"}, "__type": "dict"},
"__class": "airflow.models.param.Param",
}
},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["my_param"] == "a string value"
param = dag.params.get_param('my_param')
assert isinstance(param, Param)
assert param.description == 'hello'
assert param.schema == {'type': 'string'}
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
def test_mapped_operator_serde():
real_op = BashOperator.partial(task_id='a').map(bash_command=[1, 2, {'a': 'b'}])
serialized = SerializedBaseOperator._serialize(real_op)
assert serialized == {
'_is_dummy': False,
'_is_mapped': True,
'_task_module': 'airflow.operators.bash',
'_task_type': 'BashOperator',
'downstream_task_ids': [],
'mapped_kwargs': {
'bash_command': [
1,
2,
{"__type": "dict", "__var": {'a': 'b'}},
]
},
'partial_kwargs': {},
'task_id': 'a',
'template_fields': ['bash_command', 'env'],
}
op = SerializedBaseOperator.deserialize_operator(serialized)
assert op.operator_class == "airflow.operators.bash.BashOperator"
def test_mapped_task_group_serde():
execution_date = datetime(2020, 1, 1)
literal = [1, 2, {'a': 'b'}]
with DAG("test", start_date=execution_date) as dag:
with TaskGroup("process_one", dag=dag).map(literal) as process_one:
BaseOperator(task_id='one')
serialized = SerializedTaskGroup.serialize_task_group(process_one)
assert serialized == {
'_group_id': 'process_one',
'children': {'process_one.one': ('operator', 'process_one.one')},
'downstream_group_ids': [],
'downstream_task_ids': [],
'prefix_group_id': True,
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'upstream_task_ids': [],
'mapped_arg': [
1,
2,
{"__type": "dict", "__var": {'a': 'b'}},
],
}
with DAG("test", start_date=execution_date):
SerializedTaskGroup.deserialize_task_group(serialized, None, dag.task_dict)
| 39.236987 | 110 | 0.59063 |
acde98f9fefe3e1a3230c9a59e0880600edf77bb | 400 | py | Python | ee/clickhouse/sql/funnels/funnel.py | jacobherrington/posthog | 8a1bd1c80e9313792fa9fb3576ca9581995dc744 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/funnels/funnel.py | jacobherrington/posthog | 8a1bd1c80e9313792fa9fb3576ca9581995dc744 | [
"MIT"
] | null | null | null | ee/clickhouse/sql/funnels/funnel.py | jacobherrington/posthog | 8a1bd1c80e9313792fa9fb3576ca9581995dc744 | [
"MIT"
] | null | null | null | FUNNEL_PERSONS_BY_STEP_SQL = """
SELECT person_id
FROM (
{steps_per_person_query}
)
WHERE {persons_steps}
ORDER BY person_id
LIMIT 100
OFFSET {offset}
SETTINGS allow_experimental_window_functions = 1
"""
FUNNEL_INNER_EVENT_STEPS_QUERY = """
SELECT
person_id,
timestamp,
{steps}
{select_prop}
FROM (
{event_query}
) events
{extra_join}
WHERE (
{steps_condition}
)
{extra_conditions}
"""
| 14.285714 | 48 | 0.755 |
acde99e04299def70d553d800443468640a0ae83 | 14,990 | py | Python | backend/galaxy/models.py | linea-it/manga | e3b3a060cad55ca8549b6f874a959aab5096f226 | [
"MIT"
] | null | null | null | backend/galaxy/models.py | linea-it/manga | e3b3a060cad55ca8549b6f874a959aab5096f226 | [
"MIT"
] | 8 | 2020-02-12T17:19:20.000Z | 2021-09-22T19:37:27.000Z | backend/galaxy/models.py | linea-it/manga | e3b3a060cad55ca8549b6f874a959aab5096f226 | [
"MIT"
] | null | null | null | from django.db import models
from django.conf import settings
class Image(models.Model):
megacube = models.CharField(
verbose_name='Megacube File Name',
help_text='The name of the megacube\'s file',
max_length=150,
unique=True
)
plate = models.BigIntegerField(
verbose_name='Plate ID',
help_text=' Plate ID',
null=True,
blank=True
)
ifudsgn = models.CharField(
verbose_name='IFU Design ID',
help_text='IFU Design ID (e.g. 12701)',
max_length=100,
null=True,
blank=True
)
plateifu = models.CharField(
verbose_name='Plate IFU Design ID',
help_text='Plate+ifudesign name for this object (e.g. 7443-12701)',
max_length=100,
unique=True
)
mangaid = models.CharField(
verbose_name='MaNGA ID',
help_text='MaNGA ID for this object (e.g. 1-114145)',
max_length=100,
unique=True
)
versdrp2 = models.CharField(
verbose_name='DRP 2 Version',
help_text=' Version of mangadrp used for 2d reductions',
max_length=100,
null=True,
blank=True
)
versdrp3 = models.CharField(
verbose_name='DRP 3 Version',
help_text='Version of mangadrp used for 3d reductions',
max_length=100,
null=True,
blank=True
)
verscore = models.CharField(
verbose_name='Mangacore Version',
help_text='Version of mangacore used for reductions',
max_length=100,
null=True,
blank=True
)
versutil = models.CharField(
verbose_name='Idutils Version',
help_text='Version of idlutils used for reductions',
max_length=100,
null=True,
blank=True
)
versprim = models.CharField(
verbose_name='MaNGA ID',
help_text='Version of mangapreim used for reductions',
max_length=100,
null=True,
blank=True
)
platetyp = models.CharField(
verbose_name='MaNGA ID',
help_text=' Plate type (e.g. MANGA, APOGEE-2&MANGA)',
max_length=100,
null=True,
blank=True
)
srvymode = models.CharField(
verbose_name='Survey Mode',
help_text='Survey mode (e.g. MANGA dither, MANGA stare, APOGEE lead)',
max_length=100,
null=True,
blank=True
)
objra = models.FloatField(
verbose_name='Object Right Ascension',
help_text='Right ascension of the science object in J2000 (degrees)',
null=True,
blank=True
)
objdec = models.FloatField(
verbose_name='Object Declination',
help_text='Declination of the science object in J2000 (degrees)',
null=True,
blank=True
)
ifuglon = models.FloatField(
verbose_name='IFU RA/DEC Galactic Longitude',
help_text='Galactic longitude corresponding to IFURA/DEC (degrees)',
null=True,
blank=True
)
ifuglat = models.FloatField(
verbose_name='IFU RA/DEC Galactic Latitude',
help_text='Galactic latitude corresponding to IFURA/DEC (degrees)',
null=True,
blank=True
)
ifura = models.FloatField(
verbose_name='IFU Right Ascension',
help_text='Right ascension of this IFU in J2000 (degrees)',
null=True,
blank=True
)
ifudec = models.FloatField(
verbose_name='IFU Declination',
help_text='Declination of this IFU in J2000 (degrees)',
null=True,
blank=True
)
ebvgal = models.FloatField(
verbose_name='Galactic E(B-V)',
help_text='E(B-V) value from SDSS dust routine for this IFUGLON, IFUGLAT',
null=True,
blank=True
)
nexp = models.BigIntegerField(
verbose_name='Exposures',
help_text='Number of science exposures combined',
null=True,
blank=True
)
exptime = models.FloatField(
verbose_name='Exposures Time',
help_text='Total exposure time (seconds)',
null=True,
blank=True
)
drp3qual = models.BigIntegerField(
verbose_name='Quality Bitmask',
help_text='Quality bitmask',
null=True,
blank=True
)
bluesn2 = models.FloatField(
verbose_name='Blue (S/N)²',
help_text='Total blue (S/N)^2 across all nexp exposures',
null=True,
blank=True
)
redsn2 = models.FloatField(
verbose_name='Red (S/N)²',
help_text='Total red (S/N)^2 across all nexp exposures',
null=True,
blank=True
)
harname = models.CharField(
verbose_name='Harness Name',
help_text='IFU harness name',
max_length=100,
null=True,
blank=True
)
frlplug = models.BigIntegerField(
verbose_name='Frplug',
help_text='Frplug hardware code',
null=True,
blank=True
)
cartid = models.CharField(
verbose_name='Cartridge ID',
help_text='Cartridge ID number',
max_length=100,
null=True,
blank=True
)
designid = models.BigIntegerField(
verbose_name='Design ID',
help_text='Design ID number',
null=True,
blank=True
)
cenra = models.FloatField(
verbose_name='Plate Center Right Ascension',
help_text='Plate center right ascension in J2000 (degrees)',
null=True,
blank=True
)
cendec = models.FloatField(
verbose_name='Plate Center Declination',
help_text='Plate center declination in J2000 (degrees)',
null=True,
blank=True
)
airmsmin = models.FloatField(
verbose_name='Minimum Airmass',
help_text='Minimum airmass across all exposures',
null=True,
blank=True
)
airmsmed = models.FloatField(
verbose_name='Median Airmass',
help_text='Median airmass across all exposures',
null=True,
blank=True
)
airmsmax = models.FloatField(
verbose_name='Maximum Airmass',
help_text='Maximum airmass across all exposures',
null=True,
blank=True
)
seemin = models.FloatField(
verbose_name='Best Guider Seeing',
help_text='Best guider seeing (arcsec)',
null=True,
blank=True
)
seemed = models.FloatField(
verbose_name='Median Guider Seeing',
help_text='Median guider seeing (arcsec)',
null=True,
blank=True
)
seemax = models.FloatField(
verbose_name='Worst Guider Seeing',
help_text='Worst guider seeing (arcsec)',
null=True,
blank=True
)
transmin = models.FloatField(
verbose_name='Worst Transparency',
help_text='Worst transparency',
null=True,
blank=True
)
transmed = models.FloatField(
verbose_name='Median Transparency',
help_text='Median transparency',
null=True,
blank=True
)
transmax = models.FloatField(
verbose_name='Best Transparency',
help_text='Best transparency',
null=True,
blank=True
)
mjdmin = models.BigIntegerField(
verbose_name='Minimum MJD',
help_text='Minimum MJD across all exposures',
null=True,
blank=True
)
mjdmed = models.BigIntegerField(
verbose_name='Median MJD',
help_text='Median MJD across all exposures',
null=True,
blank=True
)
mjdmax = models.BigIntegerField(
verbose_name='Maximum MJD',
help_text='Maximum MJD across all exposures',
null=True,
blank=True
)
gfwhm = models.FloatField(
verbose_name='FWHM g',
help_text='Reconstructed FWHM in g-band (arcsec)',
null=True,
blank=True
)
rfwhm = models.FloatField(
verbose_name='FWHM r',
help_text='Reconstructed FWHM in r-band (arcsec)',
null=True,
blank=True
)
ifwhm = models.FloatField(
verbose_name='FWHM i',
help_text='Reconstructed FWHM in i-band (arcsec)',
null=True,
blank=True
)
zfwhm = models.FloatField(
verbose_name='FWHM z',
help_text='Reconstructed FWHM in z-band (arcsec)',
null=True,
blank=True
)
mngtarg1 = models.BigIntegerField(
verbose_name='Manga-Target1',
help_text='Manga-target1 maskbit for galaxy target catalog',
null=True,
blank=True
)
mngtarg2 = models.BigIntegerField(
verbose_name='Manga-Target2',
help_text='Manga-target2 maskbit for galaxy target catalog',
null=True,
blank=True
)
mngtarg3 = models.BigIntegerField(
verbose_name='Manga-Target3',
help_text='Manga-target3 maskbit for galaxy target catalog',
null=True,
blank=True
)
catidnum = models.BigIntegerField(
verbose_name='Primary Target Input Catalog',
help_text='Primary target input catalog (leading digits of mangaid)',
null=True,
blank=True
)
plttarg = models.CharField(
verbose_name='Plate Target',
help_text='Plate target reference file appropriate for this target',
max_length=100,
null=True,
blank=True
)
manga_tileid = models.BigIntegerField(
verbose_name='Tile ID',
help_text='The ID of the tile to which this object has been allocated',
null=True,
blank=True
)
nsa_iauname = models.CharField(
verbose_name='IAU-Style',
help_text='IAU-style designation based on RA/Dec (NSA)',
max_length=19,
null=True,
blank=True
)
ifudesignsize = models.BigIntegerField(
verbose_name='Allocated IFU Size',
help_text='The allocated IFU size (0 = "unallocated")',
null=True,
blank=True
)
ifutargetsize = models.BigIntegerField(
verbose_name='Ideal IFU Size',
help_text='The ideal IFU size for this object. The intended IFU size is equal to IFUTargetSize except if IFUTargetSize > 127 when it is 127, or < 19 when it is 19',
null=True,
blank=True
)
ifudesignwrongsize = models.BigIntegerField(
verbose_name='Alternative Allocated IFU Size',
help_text='The allocated IFU size if the intended IFU size was not available',
null=True,
blank=True
)
nsa_field = models.BigIntegerField(
verbose_name='Field ID',
help_text='SDSS field ID covering the target',
null=True,
blank=True
)
nsa_run = models.BigIntegerField(
verbose_name='Run ID',
help_text='SDSS run ID covering the target',
null=True,
blank=True
)
nsa_version = models.CharField(
verbose_name='Version',
help_text='Version of NSA catalogue used to select these targets',
max_length=6,
null=True,
blank=True
)
nsa_nsaid = models.BigIntegerField(
verbose_name='',
help_text='',
null=True,
blank=True
)
nsa_z = models.FloatField(
verbose_name='Heliocentric Redshift',
help_text='Heliocentric redshift',
null=True,
blank=True
)
nsa_zdist = models.FloatField(
verbose_name='z Distance',
help_text='Distance estimate using peculiar velocity model of Willick et al. (1997); mulitply by c/Ho for Mpc',
null=True,
blank=True
)
# nsa_sersic_absmag = ''
# nsa_elpetro_absmag = ''
# nsa_elpetro_amivar = ''
nsa_sersic_mass = models.FloatField(
verbose_name='K-Correction Stellar Mass For Sersic Fluxes',
help_text='Stellar mass from K-correction fit (use with caution) for Sersic fluxes (Ωm=0.3, ΩΛ=0.7, h=1)',
null=True,
blank=True
)
nsa_elpetro_mass = models.FloatField(
verbose_name='K-Correction Stellar Mass For Elliptical Petrosian Fluxes',
help_text='Stellar mass from K-correction fit (use with caution) for elliptical Petrosian fluxes (Ωm=0.3, ΩΛ=0.7, h=1)',
null=True,
blank=True
)
nsa_elpetro_ba = models.FloatField(
verbose_name='Elliptical Apertures Axis Ratio',
help_text='Axis ratio used for elliptical apertures (for this version, same as ba90)',
null=True,
blank=True
)
nsa_elpetro_phi = models.FloatField(
verbose_name='Elliptical Apertures Position Angle',
help_text='Position angle (east of north) used for elliptical apertures (for this version, same as ba90) (degrees)',
null=True,
blank=True
)
# nsa_extinction = ''
nsa_petro_th50 = models.FloatField(
verbose_name='Petrosian 50% Light',
help_text='Azimuthally averaged SDSS-style Petrosian 50% light radius (derived from r band) (arcsec)',
null=True,
blank=True
)
# nsa_petro_flux = ''
# nsa_petro_flux_ivar = ''
# nsa_elpetro_flux = ''
# nsa_elpetro_flux_ivar = ''
nsa_sersic_ba = models.FloatField(
verbose_name='Sersic B/A',
help_text='Axis ratio b/a from two-dimensional, single-component Sersic fit in r-band',
null=True,
blank=True
)
nsa_sersic_n = models.FloatField(
verbose_name='Sersic Index',
help_text='Sersic index from two-dimensional, single-component Sersic fit in r-band',
null=True,
blank=True
)
nsa_sersic_phi = models.FloatField(
verbose_name='Sersic Angle (E of N)',
help_text='Angle (E of N) of major axis in two-dimensional, single-component Sersic fit in r-band (degrees)',
null=True,
blank=True
)
nsa_sersic_th50 = models.FloatField(
verbose_name='Sersic 50% Light',
help_text='50% light radius of two-dimensional, single-component Sersic fit to r-band (arcsec)',
null=True,
blank=True
)
# nsa_sersic_flux = ''
# nsa_sersic_flux_ivar= ''
def __str__(self):
return str(self.pk)
# # List of galaxies, created by the user:
# class List(models.Model):
# id_owner = models.ForeignKey(
# settings.AUTH_USER_MODEL,
# verbose_name='Owner',
# on_delete=models.CASCADE,
# )
# name = models.CharField(max_length=200)
# objects_qty = models.IntegerField(default=0)
# creation_date = models.DateTimeField('date published')
# def __str__(self):
# return str(self.pk)
# # Key-value association, many-to-many, between the galaxies and lists:
# class ListImage(models.Model):
# class Meta:
# db_table = 'galaxy_list_image'
# id_image = models.ForeignKey(
# Image,
# verbose_name='Image',
# on_delete=models.CASCADE,
# )
# id_list = models.ForeignKey(
# List,
# verbose_name='List',
# on_delete=models.CASCADE,
# )
| 30.591837 | 172 | 0.61074 |
acde9a1b8a7acafdb898b188e4f58a61678d2299 | 2,785 | py | Python | test/pyaz/storage/account/or_policy/rule/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | test/pyaz/storage/account/or_policy/rule/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | 9 | 2021-09-24T16:37:24.000Z | 2021-12-24T00:39:19.000Z | test/pyaz/storage/account/or_policy/rule/__init__.py | bigdatamoore/py-az-cli | 54383a4ee7cc77556f6183e74e992eec95b28e01 | [
"MIT"
] | null | null | null | import json, subprocess
from ..... pyaz_utils import get_cli_name, get_params
def show(resource_group=None, account_name, policy_id, rule_id):
params = get_params(locals())
command = "az storage account or-policy rule show " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(resource_group=None, account_name, policy_id):
params = get_params(locals())
command = "az storage account or-policy rule list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def add(resource_group=None, account_name, policy_id, source_container, destination_container, min_creation_time=None, prefix_match=None):
params = get_params(locals())
command = "az storage account or-policy rule add " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group=None, account_name, policy_id, rule_id, source_container=None, destination_container=None, min_creation_time=None, prefix_match=None):
params = get_params(locals())
command = "az storage account or-policy rule update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def remove(resource_group=None, account_name, policy_id, rule_id):
params = get_params(locals())
command = "az storage account or-policy rule remove " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 37.635135 | 160 | 0.682226 |
acde9a20dd81a75f56d79c145343516539794f44 | 3,379 | py | Python | kinow_client/models/currencies.py | kinow-io/kaemo-python-sdk | 610fce09e3a9e631babf09195b0492959d9e4d56 | [
"Apache-2.0"
] | 1 | 2017-05-03T12:48:22.000Z | 2017-05-03T12:48:22.000Z | kinow_client/models/currencies.py | kinow-io/kaemo-python-sdk | 610fce09e3a9e631babf09195b0492959d9e4d56 | [
"Apache-2.0"
] | null | null | null | kinow_client/models/currencies.py | kinow-io/kaemo-python-sdk | 610fce09e3a9e631babf09195b0492959d9e4d56 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Currencies(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, pagination=None, data=None):
"""
Currencies - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'pagination': 'Pagination',
'data': 'list[Currency]'
}
self.attribute_map = {
'pagination': 'pagination',
'data': 'data'
}
self._pagination = pagination
self._data = data
@property
def pagination(self):
"""
Gets the pagination of this Currencies.
:return: The pagination of this Currencies.
:rtype: Pagination
"""
return self._pagination
@pagination.setter
def pagination(self, pagination):
"""
Sets the pagination of this Currencies.
:param pagination: The pagination of this Currencies.
:type: Pagination
"""
self._pagination = pagination
@property
def data(self):
"""
Gets the data of this Currencies.
:return: The data of this Currencies.
:rtype: list[Currency]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this Currencies.
:param data: The data of this Currencies.
:type: list[Currency]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.664234 | 77 | 0.527375 |
acde9a9c996466ddd5d54d8694a68215b8d255b2 | 1,192 | py | Python | elasticsearch_loader/iter.py | MosheZada/elasticsearch_loader | 46250f0e665185394b102768bbcd8364070fef58 | [
"MIT"
] | 5 | 2016-09-21T20:17:37.000Z | 2016-10-01T09:21:32.000Z | elasticsearch_loader/iter.py | MosheZada/elasticsearch_loader | 46250f0e665185394b102768bbcd8364070fef58 | [
"MIT"
] | null | null | null | elasticsearch_loader/iter.py | MosheZada/elasticsearch_loader | 46250f0e665185394b102768bbcd8364070fef58 | [
"MIT"
] | null | null | null | from itertools import zip_longest as zip_longest
from .parsers import json
def grouper(iterable, n, fillvalue=None):
'Collect data into fixed-length chunks or blocks'
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def bulk_builder(bulk, config):
for item in [_f for _f in bulk if _f]:
source = item
if config['keys']:
source = {x: y for x, y in list(item.items()) if x in config['keys']}
body = {'_index': config['index'],
'_type': config['type'],
'_source': source}
if config['id_field']:
body['_id'] = item[config['id_field']]
if config['as_child']:
body['_parent'] = body['_id']
body['_routing'] = body['_id']
if config['update']:
# default _op_type is 'index', which will overwrites existing doc
body['_op_type'] = 'update'
body['doc'] = source
del body['_source']
if config['pipeline']:
body['pipeline'] = config['pipeline']
yield body
def json_lines_iter(fle):
for line in fle:
yield json.loads(line)
| 27.090909 | 81 | 0.557047 |
acde9b3d412f47cca16556b124f32fa3902d0a95 | 4,877 | py | Python | src/garage/envs/hierarchical_rl_gym/jaco_catch.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | 1 | 2021-03-02T08:43:20.000Z | 2021-03-02T08:43:20.000Z | src/garage/envs/hierarchical_rl_gym/jaco_catch.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | null | null | null | src/garage/envs/hierarchical_rl_gym/jaco_catch.py | fangqyi/garage | ddafba385ef005f46f913ab352f9638760e5b412 | [
"MIT"
] | null | null | null | import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from garage.envs.hierarchical_rl_gym.jaco import JacoEnv
class JacoCatchEnv(JacoEnv):
def __init__(self, with_rot=1):
super().__init__(with_rot=with_rot)
# config
self._config.update({
"catch_reward": 100,
"hold_reward": 4,
"hold_duration": 50,
"random_throw": 1,
"init_randomness": 0.01,
"random_steps": 10,
})
# state
self._hold_duration = 0
self._target_pos = [0.5, 0.2, 0.2]
# env info
self.reward_type += ["catch_reward", "hold_reward", "success"]
self.ob_type = self.ob_shape.keys()
mujoco_env.MujocoEnv.__init__(self, "jaco_pick.xml", 4)
utils.EzPickle.__init__(self)
def step(self, a):
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
success = False
catch_reward = 0
hold_reward = 0
ctrl_reward = self._ctrl_reward(a)
dist_box = self._get_distance_hand('box')
box_z = self._get_box_pos()[2]
in_hand = dist_box < 0.06
in_air = box_z > 0.05
on_ground = box_z <= 0.05
# fail
if on_ground:
done = True
# catch
if in_air and in_hand:
self._hold_duration += 1
catch_reward = self._config["catch_reward"]
dist = np.linalg.norm(self._target_pos - self._get_box_pos())
hold_reward = self._config["hold_reward"] * (1 - dist)
# success
if self._hold_duration == self._config['hold_duration']:
print('success catch! {}'.format(self._get_box_pos()))
done = success = True
reward = ctrl_reward + catch_reward + hold_reward
info = {"ctrl_reward": ctrl_reward,
"catch_reward": catch_reward,
"hold_reward": hold_reward,
"success": success}
return ob, reward, done, info
def _throw_box(self):
# set initial force
box_pos = self._get_box_pos()
jaco_pos = self._get_pos('jaco_link_base')
dx = 0.4 + np.random.uniform(0, 0.1) * self._config["random_throw"]
dy = 0.3 + np.random.uniform(0, 0.1) * self._config["random_throw"]
force = jaco_pos + [dx, dy, 1] - box_pos
force = 110 * (force / np.linalg.norm(force))
# apply force
box_body_idx = self.model.body_name2id('box')
xfrc = self.data.xfrc_applied
xfrc[box_body_idx, :3] = force
self.do_simulation(self.unwrapped.action_space.sample(), self.frame_skip)
# reset force
xfrc[box_body_idx] = 0
def _get_obs(self):
qpos = self.data.qpos
qvel = self.data.qvel
qacc = self.data.qacc
hand_pos = self._get_hand_pos()
return np.concatenate([qpos, np.clip(qvel, -30, 30), qacc, hand_pos]).ravel()
def get_ob_dict(self, ob):
if len(ob.shape) > 1:
return {
'joint': ob[:, :31],
'acc': ob[:, 31:46],
'hand': ob[:, 46:49],
}
else:
return {
'joint': ob[:31],
'acc': ob[31:46],
'hand': ob[46:49],
}
def reset_model(self):
init_randomness = self._config["init_randomness"]
qpos = self.init_qpos + np.random.uniform(low=-init_randomness,
high=init_randomness,
size=self.model.nq)
qvel = self.init_qvel + np.random.uniform(low=-init_randomness,
high=init_randomness,
size=self.model.nv)
# set box's initial position
qpos[9:12] = np.asarray([0, 2.0, 1.5])
self.set_state(qpos, qvel)
self._hold_duration = 0
# more perturb
for _ in range(int(self._config["random_steps"])):
self.do_simulation(self.unwrapped.action_space.sample(), self.frame_skip)
self._throw_box()
return self._get_obs()
def is_terminate(self, ob, success_length=50, init=False, env=None):
if init:
self.count_evaluate = 0
self.success = True
box_pos = ob[9:12]
hand_pos = ob[46:49]
dist_box = np.linalg.norm(box_pos - hand_pos)
box_z = box_pos[2]
in_hand = dist_box < 0.06
in_air = box_z > 0.05
on_ground = box_z <= 0.05
if on_ground and self.count_evaluate > 0:
self.success = False
if in_air and in_hand:
self.count_evaluate += 1
return self.success and self.count_evaluate >= success_length
| 31.668831 | 85 | 0.540906 |
acde9b5323d11f8496bae9b11a20c1981a4da61a | 542 | py | Python | wavplayer.py | yamaccu/UIFLOW-wavPlay | 02b60d4370727094f12c6cabf38d9b39b0df4dce | [
"MIT"
] | 1 | 2021-06-25T00:12:45.000Z | 2021-06-25T00:12:45.000Z | wavplayer.py | yamaccu/UIFLOW-wavplayer | 02b60d4370727094f12c6cabf38d9b39b0df4dce | [
"MIT"
] | null | null | null | wavplayer.py | yamaccu/UIFLOW-wavplayer | 02b60d4370727094f12c6cabf38d9b39b0df4dce | [
"MIT"
] | null | null | null | from machine import I2S
from wav import wave
def playwav(filePath,volume):
wav = wave.open(filePath)
i2s = I2S(mode=I2S.MODE_MASTER | I2S.MODE_TX | I2S.MODE_DAC_BUILT_IN)
i2s.set_dac_mode(i2s.DAC_RIGHT_EN)
i2s.sample_rate(wav.getframerate())
i2s.bits(wav.getsampwidth() * 8)
i2s.nchannels(wav.getnchannels())
i2s.volume(volume)
while True:
data = wav.readframes(256)
if len(data) > 0:
i2s.write(data)
else:
wav.close()
i2s.deinit()
break | 28.526316 | 73 | 0.618081 |
acde9bff5592a091ec9d17d7d50f68ffe5ccbd79 | 56,351 | py | Python | official/vision/beta/projects/movinet/modeling/movinet_layers.py | pdelgado248/models | c0e277416002382975f30f536971f15170347dfe | [
"Apache-2.0"
] | 1 | 2021-06-08T07:29:35.000Z | 2021-06-08T07:29:35.000Z | official/vision/beta/projects/movinet/modeling/movinet_layers.py | saroz014/models | 6a9839f2cd30a42fa28cd75afd566f4479acb0e8 | [
"Apache-2.0"
] | null | null | null | official/vision/beta/projects/movinet/modeling/movinet_layers.py | saroz014/models | 6a9839f2cd30a42fa28cd75afd566f4479acb0e8 | [
"Apache-2.0"
] | 1 | 2021-06-11T03:42:27.000Z | 2021-06-11T03:42:27.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Contains common building blocks for MoViNets.
Reference: https://arxiv.org/pdf/2103.11511.pdf
"""
from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import tensorflow as tf
from official.vision.beta.modeling.layers import nn_layers
# Default kernel weight decay that may be overridden
KERNEL_WEIGHT_DECAY = 1.5e-5
def normalize_tuple(value: Union[int, Tuple[int, ...]], size: int, name: str):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable of
ints.
size: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of `size` integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * size
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(size) + ' integers. Received: ' + str(value))
if len(value_tuple) != size:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(size) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(size) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
@tf.keras.utils.register_keras_serializable(package='Vision')
class Squeeze3D(tf.keras.layers.Layer):
"""Squeeze3D layer to remove singular dimensions."""
def call(self, inputs):
"""Calls the layer with the given inputs."""
return tf.squeeze(inputs, axis=(1, 2, 3))
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobileConv2D(tf.keras.layers.Layer):
"""Conv2D layer with extra options to support mobile devices.
Reshapes 5D video tensor inputs to 4D, allowing Conv2D to run across
dimensions (2, 3) or (3, 4). Reshapes tensors back to 5D when returning the
output.
"""
def __init__(
self,
filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = (1, 1),
padding: str = 'valid',
data_format: Optional[str] = None,
dilation_rate: Union[int, Sequence[int]] = (1, 1),
groups: int = 1,
activation: Optional[nn_layers.Activation] = None,
use_bias: bool = True,
kernel_initializer: tf.keras.initializers.Initializer = 'glorot_uniform',
bias_initializer: tf.keras.initializers.Initializer = 'zeros',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
activity_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
kernel_constraint: Optional[tf.keras.constraints.Constraint] = None,
bias_constraint: Optional[tf.keras.constraints.Constraint] = None,
use_depthwise: bool = False,
use_temporal: bool = False,
use_buffered_input: bool = False,
**kwargs): # pylint: disable=g-doc-args
"""Initializes mobile conv2d.
For the majority of arguments, see tf.keras.layers.Conv2D.
Args:
use_depthwise: if True, use DepthwiseConv2D instead of Conv2D
use_temporal: if True, apply Conv2D starting from the temporal dimension
instead of the spatial dimensions.
use_buffered_input: if True, the input is expected to be padded
beforehand. In effect, calling this layer will use 'valid' padding on
the temporal dimension to simulate 'causal' padding.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the MobileConv2D operation.
"""
super(MobileConv2D, self).__init__(**kwargs)
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._padding = padding
self._data_format = data_format
self._dilation_rate = dilation_rate
self._groups = groups
self._activation = activation
self._use_bias = use_bias
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
self._activity_regularizer = activity_regularizer
self._kernel_constraint = kernel_constraint
self._bias_constraint = bias_constraint
self._use_depthwise = use_depthwise
self._use_temporal = use_temporal
self._use_buffered_input = use_buffered_input
kernel_size = normalize_tuple(kernel_size, 2, 'kernel_size')
if self._use_temporal and kernel_size[1] > 1:
raise ValueError('Temporal conv with spatial kernel is not supported.')
if use_depthwise:
self._conv = nn_layers.DepthwiseConv2D(
kernel_size=kernel_size,
strides=strides,
padding=padding,
depth_multiplier=1,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=kernel_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=kernel_constraint,
bias_constraint=bias_constraint,
use_buffered_input=use_buffered_input)
else:
self._conv = nn_layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
use_buffered_input=use_buffered_input)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'padding': self._padding,
'data_format': self._data_format,
'dilation_rate': self._dilation_rate,
'groups': self._groups,
'activation': self._activation,
'use_bias': self._use_bias,
'kernel_initializer': self._kernel_initializer,
'bias_initializer': self._bias_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activity_regularizer': self._activity_regularizer,
'kernel_constraint': self._kernel_constraint,
'bias_constraint': self._bias_constraint,
'use_depthwise': self._use_depthwise,
'use_temporal': self._use_temporal,
'use_buffered_input': self._use_buffered_input,
}
base_config = super(MobileConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Calls the layer with the given inputs."""
if self._use_temporal:
input_shape = [
tf.shape(inputs)[0],
tf.shape(inputs)[1],
tf.shape(inputs)[2] * tf.shape(inputs)[3],
inputs.shape[4]]
else:
input_shape = [
tf.shape(inputs)[0] * tf.shape(inputs)[1],
tf.shape(inputs)[2],
tf.shape(inputs)[3],
inputs.shape[4]]
x = tf.reshape(inputs, input_shape)
x = self._conv(x)
if self._use_temporal:
output_shape = [
tf.shape(x)[0],
tf.shape(x)[1],
tf.shape(inputs)[2],
tf.shape(inputs)[3],
x.shape[3]]
else:
output_shape = [
tf.shape(inputs)[0],
tf.shape(inputs)[1],
tf.shape(x)[1],
tf.shape(x)[2],
x.shape[3]]
x = tf.reshape(x, output_shape)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class ConvBlock(tf.keras.layers.Layer):
"""A Conv followed by optional BatchNorm and Activation."""
def __init__(
self,
filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = 1,
depthwise: bool = False,
causal: bool = False,
use_bias: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] =
tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY),
use_batch_norm: bool = True,
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.experimental.SyncBatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
activation: Optional[Any] = None,
conv_type: str = '3d',
use_buffered_input: bool = False,
**kwargs):
"""Initializes a conv block.
Args:
filters: filters for the conv operation.
kernel_size: kernel size for the conv operation.
strides: strides for the conv operation.
depthwise: if True, use DepthwiseConv2D instead of Conv2D
causal: if True, use causal mode for the conv operation.
use_bias: use bias for the conv operation.
kernel_initializer: kernel initializer for the conv operation.
kernel_regularizer: kernel regularizer for the conv operation.
use_batch_norm: if True, apply batch norm after the conv operation.
batch_norm_layer: class to use for batch norm, if applied.
batch_norm_momentum: momentum of the batch norm operation, if applied.
batch_norm_epsilon: epsilon of the batch norm operation, if applied.
activation: activation after the conv and batch norm operations.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
use_buffered_input: if True, the input is expected to be padded
beforehand. In effect, calling this layer will use 'valid' padding on
the temporal dimension to simulate 'causal' padding.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the ConvBlock operation.
"""
super(ConvBlock, self).__init__(**kwargs)
kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
strides = normalize_tuple(strides, 3, 'strides')
self._filters = filters
self._kernel_size = kernel_size
self._strides = strides
self._depthwise = depthwise
self._causal = causal
self._use_bias = use_bias
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._use_batch_norm = use_batch_norm
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._activation = activation
self._conv_type = conv_type
self._use_buffered_input = use_buffered_input
if activation is not None:
self._activation_layer = tf.keras.layers.Activation(activation)
else:
self._activation_layer = None
self._groups = None
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'filters': self._filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'depthwise': self._depthwise,
'causal': self._causal,
'use_bias': self._use_bias,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'use_batch_norm': self._use_batch_norm,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'activation': self._activation,
'conv_type': self._conv_type,
'use_buffered_input': self._use_buffered_input,
}
base_config = super(ConvBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Builds the layer with the given input shape."""
padding = 'causal' if self._causal else 'same'
self._groups = input_shape[-1] if self._depthwise else 1
self._conv_temporal = None
if self._conv_type == '3d_2plus1d' and self._kernel_size[0] > 1:
self._conv = nn_layers.Conv3D(
self._filters,
(1, self._kernel_size[1], self._kernel_size[2]),
strides=(1, self._strides[1], self._strides[2]),
padding='same',
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=False,
name='conv3d')
self._conv_temporal = nn_layers.Conv3D(
self._filters,
(self._kernel_size[0], 1, 1),
strides=(self._strides[0], 1, 1),
padding=padding,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=self._use_buffered_input,
name='conv3d_temporal')
elif self._conv_type == '2plus1d':
self._conv = MobileConv2D(
self._filters,
(self._kernel_size[1], self._kernel_size[2]),
strides=(self._strides[1], self._strides[2]),
padding='same',
use_depthwise=self._depthwise,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=False,
name='conv2d')
if self._kernel_size[0] > 1:
self._conv_temporal = MobileConv2D(
self._filters,
(self._kernel_size[0], 1),
strides=(self._strides[0], 1),
padding=padding,
use_temporal=True,
use_depthwise=self._depthwise,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=self._use_buffered_input,
name='conv2d_temporal')
else:
self._conv = nn_layers.Conv3D(
self._filters,
self._kernel_size,
strides=self._strides,
padding=padding,
groups=self._groups,
use_bias=self._use_bias,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_buffered_input=self._use_buffered_input,
name='conv3d')
self._batch_norm = None
self._batch_norm_temporal = None
if self._use_batch_norm:
self._batch_norm = self._batch_norm_layer(
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
name='bn')
if self._conv_type != '3d' and self._conv_temporal is not None:
self._batch_norm_temporal = self._batch_norm_layer(
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
name='bn_temporal')
super(ConvBlock, self).build(input_shape)
def call(self, inputs):
"""Calls the layer with the given inputs."""
x = inputs
x = self._conv(x)
if self._batch_norm is not None:
x = self._batch_norm(x)
if self._activation_layer is not None:
x = self._activation_layer(x)
if self._conv_temporal is not None:
x = self._conv_temporal(x)
if self._batch_norm_temporal is not None:
x = self._batch_norm_temporal(x)
if self._activation_layer is not None:
x = self._activation_layer(x)
return x
@tf.keras.utils.register_keras_serializable(package='Vision')
class StreamBuffer(tf.keras.layers.Layer):
"""Stream buffer wrapper which caches activations of previous frames."""
def __init__(self,
buffer_size: int,
state_prefix: Optional[str] = None,
**kwargs):
"""Initializes a stream buffer.
Args:
buffer_size: the number of input frames to cache.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the StreamBuffer operation.
"""
super(StreamBuffer, self).__init__(**kwargs)
state_prefix = state_prefix if state_prefix is not None else ''
self._state_prefix = state_prefix
self._state_name = f'{state_prefix}/stream_buffer'
self._buffer_size = buffer_size
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'buffer_size': self._buffer_size,
'state_prefix': self._state_prefix,
}
base_config = super(StreamBuffer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None,
) -> Tuple[Any, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Expected keys include `state_prefix + '/stream_buffer'`.
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
buffer = states.get(self._state_name, None)
# Create the buffer if it does not exist in the states.
# Output buffer shape:
# [batch_size, buffer_size, input_height, input_width, num_channels]
if buffer is None:
shape = tf.shape(inputs)
buffer = tf.zeros(
[shape[0], self._buffer_size, shape[2], shape[3], shape[4]],
dtype=inputs.dtype)
# tf.pad has limited support for tf lite, so use tf.concat instead.
full_inputs = tf.concat([buffer, inputs], axis=1)
# Cache the last b frames of the input where b is the buffer size and f
# is the number of input frames. If b > f, then we will cache the last b - f
# frames from the previous buffer concatenated with the current f input
# frames.
new_buffer = full_inputs[:, -self._buffer_size:]
states[self._state_name] = new_buffer
return full_inputs, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class StreamConvBlock(ConvBlock):
"""ConvBlock with StreamBuffer."""
def __init__(
self,
filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = 1,
depthwise: bool = False,
causal: bool = False,
use_bias: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
use_batch_norm: bool = True,
batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.experimental
.SyncBatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
activation: Optional[Any] = None,
conv_type: str = '3d',
state_prefix: Optional[str] = None,
**kwargs):
"""Initializes a stream conv block.
Args:
filters: filters for the conv operation.
kernel_size: kernel size for the conv operation.
strides: strides for the conv operation.
depthwise: if True, use DepthwiseConv2D instead of Conv2D
causal: if True, use causal mode for the conv operation.
use_bias: use bias for the conv operation.
kernel_initializer: kernel initializer for the conv operation.
kernel_regularizer: kernel regularizer for the conv operation.
use_batch_norm: if True, apply batch norm after the conv operation.
batch_norm_layer: class to use for batch norm, if applied.
batch_norm_momentum: momentum of the batch norm operation, if applied.
batch_norm_epsilon: epsilon of the batch norm operation, if applied.
activation: activation after the conv and batch norm operations.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
Returns:
A output tensor of the StreamConvBlock operation.
"""
kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
buffer_size = kernel_size[0] - 1
use_buffer = buffer_size > 0 and causal
self._state_prefix = state_prefix
super(StreamConvBlock, self).__init__(
filters,
kernel_size,
strides=strides,
depthwise=depthwise,
causal=causal,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=use_batch_norm,
batch_norm_layer=batch_norm_layer,
batch_norm_momentum=batch_norm_momentum,
batch_norm_epsilon=batch_norm_epsilon,
activation=activation,
conv_type=conv_type,
use_buffered_input=use_buffer,
**kwargs)
self._stream_buffer = None
if use_buffer:
self._stream_buffer = StreamBuffer(
buffer_size=buffer_size, state_prefix=state_prefix)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {'state_prefix': self._state_prefix}
base_config = super(StreamConvBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
x = inputs
if self._stream_buffer is not None:
x, states = self._stream_buffer(x, states=states)
x = super(StreamConvBlock, self).call(x)
return x, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class StreamSqueezeExcitation(tf.keras.layers.Layer):
"""Squeeze and excitation layer with causal mode.
Reference: https://arxiv.org/pdf/1709.01507.pdf
"""
def __init__(
self,
hidden_filters: int,
activation: nn_layers.Activation = 'swish',
gating_activation: nn_layers.Activation = 'sigmoid',
causal: bool = False,
conv_type: str = '3d',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
use_positional_encoding: bool = False,
state_prefix: Optional[str] = None,
**kwargs):
"""Implementation for squeeze and excitation.
Args:
hidden_filters: The hidden filters of squeeze excite.
activation: name of the activation function.
gating_activation: name of the activation function for gating.
causal: if True, use causal mode in the global average pool.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operation.
use_positional_encoding: add a positional encoding after the (cumulative)
global average pooling layer.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(StreamSqueezeExcitation, self).__init__(**kwargs)
self._hidden_filters = hidden_filters
self._activation = activation
self._gating_activation = gating_activation
self._causal = causal
self._conv_type = conv_type
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._use_positional_encoding = use_positional_encoding
self._state_prefix = state_prefix
self._pool = nn_layers.GlobalAveragePool3D(
keepdims=True, causal=causal, state_prefix=state_prefix)
self._pos_encoding = None
if use_positional_encoding:
self._pos_encoding = nn_layers.PositionalEncoding(
initializer='zeros', state_prefix=state_prefix)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'hidden_filters': self._hidden_filters,
'activation': self._activation,
'gating_activation': self._gating_activation,
'causal': self._causal,
'conv_type': self._conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'use_positional_encoding': self._use_positional_encoding,
'state_prefix': self._state_prefix,
}
base_config = super(StreamSqueezeExcitation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Builds the layer with the given input shape."""
self._se_reduce = ConvBlock(
filters=self._hidden_filters,
kernel_size=1,
causal=self._causal,
use_bias=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_batch_norm=False,
activation=self._activation,
conv_type=self._conv_type,
name='se_reduce')
self._se_expand = ConvBlock(
filters=input_shape[-1],
kernel_size=1,
causal=self._causal,
use_bias=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_batch_norm=False,
activation=self._gating_activation,
conv_type=self._conv_type,
name='se_expand')
super(StreamSqueezeExcitation, self).build(input_shape)
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
x, states = self._pool(inputs, states=states)
if self._pos_encoding is not None:
x, states = self._pos_encoding(x, states=states)
x = self._se_reduce(x)
x = self._se_expand(x)
return x * inputs, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class MobileBottleneck(tf.keras.layers.Layer):
"""A depthwise inverted bottleneck block.
Uses dependency injection to allow flexible definition of different layers
within this block.
"""
def __init__(self,
expansion_layer: tf.keras.layers.Layer,
feature_layer: tf.keras.layers.Layer,
projection_layer: tf.keras.layers.Layer,
attention_layer: Optional[tf.keras.layers.Layer] = None,
skip_layer: Optional[tf.keras.layers.Layer] = None,
stochastic_depth_drop_rate: Optional[float] = None,
**kwargs):
"""Implementation for mobile bottleneck.
Args:
expansion_layer: initial layer used for pointwise expansion.
feature_layer: main layer used for computing 3D features.
projection_layer: layer used for pointwise projection.
attention_layer: optional layer used for attention-like operations (e.g.,
squeeze excite).
skip_layer: optional skip layer used to project the input before summing
with the output for the residual connection.
stochastic_depth_drop_rate: optional drop rate for stochastic depth.
**kwargs: keyword arguments to be passed to this layer.
"""
super(MobileBottleneck, self).__init__(**kwargs)
self._projection_layer = projection_layer
self._attention_layer = attention_layer
self._skip_layer = skip_layer
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._identity = tf.keras.layers.Activation(tf.identity)
self._rezero = nn_layers.Scale(initializer='zeros', name='rezero')
if stochastic_depth_drop_rate:
self._stochastic_depth = nn_layers.StochasticDepth(
stochastic_depth_drop_rate, name='stochastic_depth')
else:
self._stochastic_depth = None
self._feature_layer = feature_layer
self._expansion_layer = expansion_layer
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
}
base_config = super(MobileBottleneck, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
x = self._expansion_layer(inputs)
x, states = self._feature_layer(x, states=states)
x, states = self._attention_layer(x, states=states)
x = self._projection_layer(x)
# Add identity so that the ops are ordered as written. This is useful for,
# e.g., quantization.
x = self._identity(x)
x = self._rezero(x)
if self._stochastic_depth is not None:
x = self._stochastic_depth(x)
if self._skip_layer is not None:
skip = self._skip_layer(inputs)
else:
skip = inputs
return x + skip, states
@tf.keras.utils.register_keras_serializable(package='Vision')
class SkipBlock(tf.keras.layers.Layer):
"""Skip block for bottleneck blocks."""
def __init__(
self,
out_filters: int,
downsample: bool = False,
conv_type: str = '3d',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] =
tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer =
tf.keras.layers.experimental.SyncBatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
**kwargs):
"""Implementation for skip block.
Args:
out_filters: the number of projected output filters.
downsample: if True, downsamples the input by a factor of 2 by applying
average pooling with a 3x3 kernel size on the spatial dimensions.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv projection.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
**kwargs: keyword arguments to be passed to this layer.
"""
super(SkipBlock, self).__init__(**kwargs)
self._out_filters = out_filters
self._downsample = downsample
self._conv_type = conv_type
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._projection = ConvBlock(
filters=self._out_filters,
kernel_size=1,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
name='skip_project')
if downsample:
if self._conv_type == '2plus1d':
self._pool = tf.keras.layers.AveragePooling2D(
pool_size=(3, 3),
strides=(2, 2),
padding='same',
name='skip_pool')
else:
self._pool = tf.keras.layers.AveragePooling3D(
pool_size=(1, 3, 3),
strides=(1, 2, 2),
padding='same',
name='skip_pool')
else:
self._pool = None
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'out_filters': self._out_filters,
'downsample': self._downsample,
'conv_type': self._conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
}
base_config = super(SkipBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
"""Calls the layer with the given inputs."""
x = inputs
if self._pool is not None:
if self._conv_type == '2plus1d':
x = tf.reshape(x, [-1, tf.shape(x)[2], tf.shape(x)[3], x.shape[4]])
x = self._pool(x)
if self._conv_type == '2plus1d':
x = tf.reshape(
x,
[tf.shape(inputs)[0], -1, tf.shape(x)[1],
tf.shape(x)[2], x.shape[3]])
return self._projection(x)
@tf.keras.utils.register_keras_serializable(package='Vision')
class MovinetBlock(tf.keras.layers.Layer):
"""A basic block for MoViNets.
Applies a mobile inverted bottleneck with pointwise expansion, 3D depthwise
convolution, 3D squeeze excite, pointwise projection, and residual connection.
"""
def __init__(
self,
out_filters: int,
expand_filters: int,
kernel_size: Union[int, Sequence[int]] = (3, 3, 3),
strides: Union[int, Sequence[int]] = (1, 1, 1),
causal: bool = False,
activation: nn_layers.Activation = 'swish',
se_ratio: float = 0.25,
stochastic_depth_drop_rate: float = 0.,
conv_type: str = '3d',
use_positional_encoding: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.experimental
.SyncBatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
state_prefix: Optional[str] = None,
**kwargs):
"""Implementation for MoViNet block.
Args:
out_filters: number of output filters for the final projection.
expand_filters: number of expansion filters after the input.
kernel_size: kernel size of the main depthwise convolution.
strides: strides of the main depthwise convolution.
causal: if True, run the temporal convolutions in causal mode.
activation: activation to use across all conv operations.
se_ratio: squeeze excite filters ratio.
stochastic_depth_drop_rate: optional drop rate for stochastic depth.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
use_positional_encoding: add a positional encoding after the (cumulative)
global average pooling layer in the squeeze excite layer.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(MovinetBlock, self).__init__(**kwargs)
self._kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
self._strides = normalize_tuple(strides, 3, 'strides')
se_hidden_filters = nn_layers.make_divisible(
se_ratio * expand_filters, divisor=8)
self._out_filters = out_filters
self._expand_filters = expand_filters
self._kernel_size = kernel_size
self._causal = causal
self._activation = activation
self._se_ratio = se_ratio
self._downsample = any(s > 1 for s in self._strides)
self._stochastic_depth_drop_rate = stochastic_depth_drop_rate
self._conv_type = conv_type
self._use_positional_encoding = use_positional_encoding
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._state_prefix = state_prefix
self._expansion = ConvBlock(
expand_filters,
(1, 1, 1),
activation=activation,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
name='expansion')
self._feature = StreamConvBlock(
expand_filters,
self._kernel_size,
strides=self._strides,
depthwise=True,
causal=self._causal,
activation=activation,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
state_prefix=state_prefix,
name='feature')
self._projection = ConvBlock(
out_filters,
(1, 1, 1),
activation=None,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
name='projection')
self._attention = StreamSqueezeExcitation(
se_hidden_filters,
activation=activation,
causal=self._causal,
conv_type=conv_type,
use_positional_encoding=use_positional_encoding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
state_prefix=state_prefix,
name='se')
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'out_filters': self._out_filters,
'expand_filters': self._expand_filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'causal': self._causal,
'activation': self._activation,
'se_ratio': self._se_ratio,
'stochastic_depth_drop_rate': self._stochastic_depth_drop_rate,
'conv_type': self._conv_type,
'use_positional_encoding': self._use_positional_encoding,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'state_prefix': self._state_prefix,
}
base_config = super(MovinetBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape):
"""Builds the layer with the given input shape."""
if input_shape[-1] == self._out_filters and not self._downsample:
self._skip = None
else:
self._skip = SkipBlock(
self._out_filters,
downsample=self._downsample,
conv_type=self._conv_type,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
name='skip')
self._mobile_bottleneck = MobileBottleneck(
self._expansion,
self._feature,
self._projection,
attention_layer=self._attention,
skip_layer=self._skip,
stochastic_depth_drop_rate=self._stochastic_depth_drop_rate,
name='bneck')
super(MovinetBlock, self).build(input_shape)
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
return self._mobile_bottleneck(inputs, states=states)
@tf.keras.utils.register_keras_serializable(package='Vision')
class Stem(tf.keras.layers.Layer):
"""Stem layer for video networks.
Applies an initial convolution block operation.
"""
def __init__(
self,
out_filters: int,
kernel_size: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]] = (1, 1, 1),
causal: bool = False,
conv_type: str = '3d',
activation: nn_layers.Activation = 'swish',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.experimental
.SyncBatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
state_prefix: Optional[str] = None,
**kwargs):
"""Implementation for video model stem.
Args:
out_filters: number of output filters.
kernel_size: kernel size of the convolution.
strides: strides of the convolution.
causal: if True, run the temporal convolutions in causal mode.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
activation: the input activation name.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(Stem, self).__init__(**kwargs)
self._out_filters = out_filters
self._kernel_size = normalize_tuple(kernel_size, 3, 'kernel_size')
self._strides = normalize_tuple(strides, 3, 'strides')
self._causal = causal
self._conv_type = conv_type
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._state_prefix = state_prefix
self._stem = StreamConvBlock(
filters=self._out_filters,
kernel_size=self._kernel_size,
strides=self._strides,
causal=self._causal,
activation=self._activation,
conv_type=self._conv_type,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
state_prefix=self._state_prefix,
name='stem')
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'out_filters': self._out_filters,
'kernel_size': self._kernel_size,
'strides': self._strides,
'causal': self._causal,
'activation': self._activation,
'conv_type': self._conv_type,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'state_prefix': self._state_prefix,
}
base_config = super(Stem, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self,
inputs: tf.Tensor,
states: Optional[nn_layers.States] = None
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
return self._stem(inputs, states=states)
@tf.keras.utils.register_keras_serializable(package='Vision')
class Head(tf.keras.layers.Layer):
"""Head layer for video networks.
Applies pointwise projection and global pooling.
"""
def __init__(
self,
project_filters: int,
conv_type: str = '3d',
activation: nn_layers.Activation = 'swish',
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = tf.keras
.regularizers.L2(KERNEL_WEIGHT_DECAY),
batch_norm_layer: tf.keras.layers.Layer = tf.keras.layers.experimental
.SyncBatchNormalization,
batch_norm_momentum: float = 0.99,
batch_norm_epsilon: float = 1e-3,
state_prefix: Optional[str] = None,
**kwargs):
"""Implementation for video model head.
Args:
project_filters: number of pointwise projection filters.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
activation: the input activation name.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
batch_norm_layer: class to use for batch norm.
batch_norm_momentum: momentum of the batch norm operation.
batch_norm_epsilon: epsilon of the batch norm operation.
state_prefix: a prefix string to identify states.
**kwargs: keyword arguments to be passed to this layer.
"""
super(Head, self).__init__(**kwargs)
self._project_filters = project_filters
self._conv_type = conv_type
self._activation = activation
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._batch_norm_layer = batch_norm_layer
self._batch_norm_momentum = batch_norm_momentum
self._batch_norm_epsilon = batch_norm_epsilon
self._state_prefix = state_prefix
self._project = ConvBlock(
filters=project_filters,
kernel_size=1,
activation=activation,
conv_type=conv_type,
kernel_regularizer=kernel_regularizer,
use_batch_norm=True,
batch_norm_layer=self._batch_norm_layer,
batch_norm_momentum=self._batch_norm_momentum,
batch_norm_epsilon=self._batch_norm_epsilon,
name='project')
self._pool = nn_layers.GlobalAveragePool3D(
keepdims=True, causal=False, state_prefix=state_prefix)
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'project_filters': self._project_filters,
'conv_type': self._conv_type,
'activation': self._activation,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'batch_norm_momentum': self._batch_norm_momentum,
'batch_norm_epsilon': self._batch_norm_epsilon,
'state_prefix': self._state_prefix,
}
base_config = super(Head, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(
self,
inputs: Union[tf.Tensor, Mapping[str, tf.Tensor]],
states: Optional[nn_layers.States] = None,
) -> Tuple[tf.Tensor, nn_layers.States]:
"""Calls the layer with the given inputs.
Args:
inputs: the input tensor or dict of endpoints.
states: a dict of states such that, if any of the keys match for this
layer, will overwrite the contents of the buffer(s).
Returns:
the output tensor and states
"""
states = dict(states) if states is not None else {}
x = self._project(inputs)
return self._pool(x, states=states)
@tf.keras.utils.register_keras_serializable(package='Vision')
class ClassifierHead(tf.keras.layers.Layer):
"""Head layer for video networks.
Applies dense projection, dropout, and classifier projection. Expects input
to be pooled vector with shape [batch_size, 1, 1, 1, num_channels]
"""
def __init__(
self,
head_filters: int,
num_classes: int,
dropout_rate: float = 0.,
conv_type: str = '3d',
activation: nn_layers.Activation = 'swish',
output_activation: Optional[nn_layers.Activation] = None,
max_pool_predictions: bool = False,
kernel_initializer: tf.keras.initializers.Initializer = 'HeNormal',
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] =
tf.keras.regularizers.L2(KERNEL_WEIGHT_DECAY),
**kwargs):
"""Implementation for video model classifier head.
Args:
head_filters: number of dense head projection filters.
num_classes: number of output classes for the final logits.
dropout_rate: the dropout rate applied to the head projection.
conv_type: '3d', '2plus1d', or '3d_2plus1d'. '3d' uses the default 3D
ops. '2plus1d' split any 3D ops into two sequential 2D ops with their
own batch norm and activation. '3d_2plus1d' is like '2plus1d', but
uses two sequential 3D ops instead.
activation: the input activation name.
output_activation: optional final activation (e.g., 'softmax').
max_pool_predictions: apply temporal softmax pooling to predictions.
Intended for multi-label prediction, where multiple labels are
distributed across the video. Currently only supports single clips.
kernel_initializer: kernel initializer for the conv operations.
kernel_regularizer: kernel regularizer for the conv operations.
**kwargs: keyword arguments to be passed to this layer.
"""
super(ClassifierHead, self).__init__(**kwargs)
self._head_filters = head_filters
self._num_classes = num_classes
self._dropout_rate = dropout_rate
self._conv_type = conv_type
self._output_activation = output_activation
self._max_pool_predictions = max_pool_predictions
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._dropout = tf.keras.layers.Dropout(dropout_rate)
self._head = ConvBlock(
filters=head_filters,
kernel_size=1,
activation=activation,
use_bias=True,
use_batch_norm=False,
conv_type=conv_type,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer,
name='head')
self._classifier = ConvBlock(
filters=num_classes,
kernel_size=1,
kernel_initializer=tf.keras.initializers.random_normal(stddev=0.01),
kernel_regularizer=None,
use_bias=True,
use_batch_norm=False,
conv_type=conv_type,
name='classifier')
self._max_pool = nn_layers.TemporalSoftmaxPool()
self._squeeze = Squeeze3D()
output_activation = output_activation if output_activation else 'linear'
self._cast = tf.keras.layers.Activation(
output_activation, dtype='float32', name='cast')
def get_config(self):
"""Returns a dictionary containing the config used for initialization."""
config = {
'head_filters': self._head_filters,
'num_classes': self._num_classes,
'dropout_rate': self._dropout_rate,
'conv_type': self._conv_type,
'output_activation': self._output_activation,
'max_pool_predictions': self._max_pool_predictions,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
}
base_config = super(ClassifierHead, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor) -> tf.Tensor:
"""Calls the layer with the given inputs."""
# Input Shape: [batch_size, 1, 1, 1, input_channels]
x = inputs
x = self._head(x)
if self._dropout_rate and self._dropout_rate > 0:
x = self._dropout(x)
x = self._classifier(x)
if self._max_pool_predictions:
x = self._max_pool(x)
x = self._squeeze(x)
x = self._cast(x)
return x
| 38.023617 | 80 | 0.682401 |
acde9c49f3ab865171de1355cdf53ed252e83531 | 51,310 | py | Python | test.py | rzambre/legion | 231f321c97849c9ad0ff58921de786730ca8243d | [
"Apache-2.0"
] | null | null | null | test.py | rzambre/legion | 231f321c97849c9ad0ff58921de786730ca8243d | [
"Apache-2.0"
] | null | null | null | test.py | rzambre/legion | 231f321c97849c9ad0ff58921de786730ca8243d | [
"Apache-2.0"
] | 1 | 2021-01-07T22:07:18.000Z | 2021-01-07T22:07:18.000Z | #!/usr/bin/env python
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import argparse, datetime, glob, json, multiprocessing, os, platform, shutil, subprocess, sys, traceback, tempfile
import signal
make_exe = os.environ.get('MAKE', 'make')
# Find physical core count of the machine.
if platform.system() == 'Linux':
lines = subprocess.check_output(['lscpu', '--parse=core']).decode('utf-8')
physical_cores = len(set(line for line in lines.strip().split('\n')
if not line.startswith('#')))
elif platform.system() == 'Darwin':
physical_cores = int(
subprocess.check_output(['sysctl', '-n', 'hw.physicalcpu']).decode('utf-8'))
elif platform.system() == 'FreeBSD':
physical_cores = int(
subprocess.check_output(['sysctl', '-n', 'hw.ncpu']).decode('utf-8'))
make_exe = os.environ.get('MAKE', 'gmake') # default needs to be GNU make
else:
raise Exception('Unknown platform: %s' % platform.system())
# Choose a reasonable number of application cores given the
# available physical cores.
app_cores = max(physical_cores - 2, 1)
legion_cxx_tests = [
# Tutorial
['tutorial/00_hello_world/hello_world', []],
['tutorial/01_tasks_and_futures/tasks_and_futures', []],
['tutorial/02_index_tasks/index_tasks', []],
['tutorial/03_global_vars/global_vars', []],
['tutorial/04_logical_regions/logical_regions', []],
['tutorial/05_physical_regions/physical_regions', []],
['tutorial/06_privileges/privileges', []],
['tutorial/07_partitioning/partitioning', []],
['tutorial/08_multiple_partitions/multiple_partitions', []],
['tutorial/09_custom_mapper/custom_mapper', []],
# Examples
['examples/circuit/circuit', []],
['examples/dynamic_registration/dynamic_registration', []],
['examples/ghost/ghost', ['-ll:cpu', '4']],
['examples/ghost_pull/ghost_pull', ['-ll:cpu', '4']],
['examples/realm_saxpy/realm_saxpy', []],
['examples/realm_stencil/realm_stencil', ['-ll:cpu', '4']],
['examples/spmd_cgsolver/spmd_cgsolver', ['-ll:cpu', '4', '-perproc']],
['examples/virtual_map/virtual_map', []],
['examples/attach_2darray_c_fortran_layout/attach_2darray', []],
['examples/attach_array_daxpy/attach_array_daxpy', []],
# Comment this test out until it works everywhere
#['examples/implicit_top_task/implicit_top_task', []],
# Tests
['test/rendering/rendering', ['-i', '2', '-n', '64', '-ll:cpu', '4']],
['test/legion_stl/test_stl', []],
]
legion_fortran_tests = [
['tutorial/fortran/00_hello_world/hello_world_fortran', []],
['tutorial/fortran/01_tasks_and_futures/tasks_and_futures_fortran', []],
['tutorial/fortran/02_index_tasks/index_tasks_fortran', []],
['tutorial/fortran/03_physical_regions/physical_regions_fortran', []],
['tutorial/fortran/04_privileges_accessor/privileges_accessor_fortran', []],
['tutorial/fortran/05_privileges_raw_ptr/privileges_raw_ptr_fortran', []],
['tutorial/fortran/06_partitioning/partitioning_fortran', []],
['tutorial/fortran/07_partitioning_fortran_task/partitioning_fortran_task_fortran', []],
['tutorial/fortran/08_multiple_partitions/multiple_partitions_fortran', []],
['tutorial/fortran/09_region_2d/region_2d_fortran', []],
['tutorial/fortran/10_attach_array/attach_array_fortran', []],
]
if platform.system() != 'Darwin':
legion_cxx_tests += [
# FIXME: Fails non-deterministically on Mac OS: https://github.com/StanfordLegion/legion/issues/213
['test/attach_file_mini/attach_file_mini', []],
]
legion_network_cxx_tests = [
# Examples
['examples/mpi_interop/mpi_interop', []],
]
legion_openmp_cxx_tests = [
# Examples
['examples/omp_saxpy/omp_saxpy', ['-ll:ocpu', '1']],
]
legion_kokkos_cxx_tests = [
# Examples
['examples/kokkos_saxpy/kokkos_saxpy', []],
]
legion_python_cxx_tests = [
# Bindings
['bindings/python/legion_python', ['examples/domain.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/domain_point.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/domain_transform.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/future.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/hello.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/import.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/index_launch.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/ispace.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/method.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/must_epoch.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/partition.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/partition_by_field.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/partition_by_image.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/partition_by_image_range.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/partition_by_preimage.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/partition_by_preimage_range.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/partition_by_restriction.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/reduction.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/region.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/region_fields.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/return_region.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/single_launch.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/struct.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/trace.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/tunable.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['examples/types.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['tests/fail/privileges.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['tests/pass/copy.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['tests/pass/empty_region.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['tests/pass/print_once.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['tests/pass/privileges.py', '-ll:py', '1', '-ll:cpu', '0']],
['bindings/python/legion_python', ['tests/pass/no_access.py', '-ll:py', '1', '-ll:cpu', '0']],
# Examples
['examples/python_interop/python_interop', ['-ll:py', '1']],
# Tests
['test/python_bindings/python_bindings', ['-ll:py', '1', '-ll:cpu', '0']],
]
legion_hdf_cxx_tests = [
# Tests
['test/hdf_attach_subregion_parallel/hdf_attach_subregion_parallel', ['-ll:cpu', '4']],
]
if platform.system() != 'Darwin':
legion_hdf_cxx_tests += [
# FIXME: Fails non-deterministically on Mac OS: https://github.com/StanfordLegion/legion/issues/213
['examples/attach_file/attach_file', ['-h', 'data.h5', '-d', '/path/to/data']],
]
def get_legion_cxx_perf_tests(nodes, cores_per_node):
return [
# Circuit: Heavy Compute
['examples/circuit/circuit',
['-l', '10', '-p', str(cores_per_node * nodes), '-npp', '2500', '-wpp', '10000',
'-ll:csize', '8192', '-ll:cpu', str(cores_per_node)]],
# Circuit: Light Compute
['examples/circuit/circuit',
['-l', '10', '-p', '100', '-npp', '2', '-wpp', '4', '-ll:cpu', '2']],
]
def get_regent_perf_tests(nodes, cores_per_node):
return [
# Circuit: Heavy Compute
['language/examples/circuit_sparse.rg',
['-l', '10', '-p', str(nodes * cores_per_node), '-npp', '2500', '-wpp', '10000',
'-ll:csize', '8192', '-ll:cpu', str(cores_per_node), '-fflow-spmd-shardsize', str(cores_per_node)]],
# Circuit: Light Compute
['language/examples/circuit_sparse.rg',
['-l', '10', '-p', '100', '-npp', '2', '-wpp', '4', '-ll:cpu', '2',
'-fflow-spmd-shardsize', '2']],
# PENNANT: Heavy Compute
['language/examples/pennant_fast.rg',
['pennant.tests/sedovbig3x30/sedovbig.pnt',
'-seq_init', '0', '-par_init', '1', '-print_ts', '1', '-prune', '5',
'-npieces', str(nodes * cores_per_node), '-numpcx', '1', '-numpcy', str(nodes * cores_per_node),
'-ll:csize', '8192', '-ll:cpu', str(cores_per_node), '-fflow-spmd-shardsize', str(cores_per_node),
'-fvectorize-unsafe', '1']],
]
class TestTimeoutException(Exception):
pass
def sigalrm_handler(signum, frame):
raise TestTimeoutException
def cmd(command, env=None, cwd=None, timelimit=None):
print(' '.join(command))
sys.stdout.flush() # python 2 doesn't have flush option in print
if timelimit:
child = subprocess.Popen(command, env=env, cwd=cwd)
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(timelimit)
try:
ret = child.wait()
signal.alarm(0) # disable alarm
signal.signal(signal.SIGALRM, signal.SIG_DFL)
if ret:
raise subprocess.CalledProcessError(ret, command)
return ret
except TestTimeoutException:
child.kill()
raise # re-raise
else:
return subprocess.check_call(command, env=env, cwd=cwd)
def run_test_regent(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
cmd([sys.executable, os.path.join(root_dir, 'language/travis.py')], env=env)
def run_cxx(tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit):
prev_built_dir = None
for test_file, test_flags in tests:
test_dir = os.path.dirname(os.path.join(root_dir, test_file))
if bin_dir:
test_path = os.path.join(bin_dir, os.path.basename(test_file))
else:
test_path = os.path.join(root_dir, test_file)
# build if this is in a new directory
if test_dir != prev_built_dir:
# and clean up the previous directory to keep disk usage down
if prev_built_dir:
cmd(['find', prev_built_dir , '-type', 'f', '(', '-name', '*.a', '-o', '-name', os.path.basename(test_file), ')', '-exec', 'rm', '-v', '{}', ';'])
cmd([make_exe, '-C', test_dir, '-j', str(thread_count)], env=env)
prev_built_dir = test_dir
cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir, timelimit=timelimit)
if prev_built_dir:
cmd(['find', prev_built_dir , '-type', 'f', '(', '-name', '*.a', '-o', '-name', os.path.basename(test_file), ')', '-exec', 'rm', '-v', '{}', ';'])
def run_regent(tests, flags, launcher, root_dir, env, thread_count, timelimit):
for test_file, test_flags in tests:
test_dir = os.path.dirname(os.path.join(root_dir, test_file))
test_path = os.path.join(root_dir, test_file)
cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir, timelimit=timelimit)
def precompile_regent(tests, flags, launcher, root_dir, env, thread_count):
exe_tests = []
for test_file, test_flags in tests:
test_dir = os.path.dirname(os.path.join(root_dir, test_file))
test_path = os.path.join(root_dir, test_file)
exe = os.path.splitext(test_path)[0] + '.exe'
env = dict(list(env.items()) + [('OBJNAME', exe)])
cmd(launcher + [test_path] + flags + test_flags, env=env, cwd=test_dir)
exe_tests.append([exe, test_flags])
return exe_tests
def run_test_legion_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
if env['USE_CUDA'] == '1':
flags.extend(['-ll:gpu', '1'])
if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):
flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])
run_cxx(legion_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)
def run_test_legion_network_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
if env['USE_CUDA'] == '1':
flags.extend(['-ll:gpu', '1'])
if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):
flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])
run_cxx(legion_network_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)
def run_test_legion_openmp_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
if env['USE_CUDA'] == '1':
flags.extend(['-ll:gpu', '1'])
if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):
flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])
run_cxx(legion_openmp_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)
def run_test_legion_kokkos_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
if env['USE_CUDA'] == '1':
flags.extend(['-ll:gpu', '1'])
if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):
flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])
run_cxx(legion_kokkos_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)
def run_test_legion_python_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
# Hack: legion_python currently requires the module name to come first
flags = [] # ['-logfile', 'out_%.log']
python_dir = os.path.join(root_dir, 'bindings', 'python')
# Hack: Fix up the environment so that Python can find all the examples.
env = dict(list(env.items()) + [
('PYTHONPATH', ':'.join([python_dir])),
('LD_LIBRARY_PATH', ':'.join([python_dir])),
])
# Clean up around python because we are going to make shared objects
# which is not something that anyone else does
cmd([make_exe, '-C', python_dir, 'clean'], env=env)
run_cxx(legion_python_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)
cmd([make_exe, '-C', python_dir, 'clean'], env=env)
def run_test_legion_hdf_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
if env['USE_CUDA'] == '1':
flags.extend(['-ll:gpu', '1'])
if (env['USE_KOKKOS'] == '1') and (env['USE_OPENMP'] == '1'):
flags.extend(['-ll:ocpu', '1', '-ll:onuma', '0' ])
run_cxx(legion_hdf_cxx_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)
def run_test_legion_fortran(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
if env['USE_CUDA'] == '1':
flags.extend(['-ll:gpu', '1'])
run_cxx(legion_fortran_tests, flags, launcher, root_dir, bin_dir, env, thread_count, timelimit)
def run_test_fuzzer(launcher, root_dir, tmp_dir, bin_dir, env, thread_count):
env = dict(list(env.items()) + [('WARN_AS_ERROR', '0')])
fuzz_dir = os.path.join(tmp_dir, 'fuzz-tester')
cmd(['git', 'clone', 'https://github.com/StanfordLegion/fuzz-tester', fuzz_dir])
# TODO; Merge deppart branch into master after this makes it to stable Legion branch
cmd(['git', 'checkout', 'deppart'], cwd=fuzz_dir)
cmd(['python', 'main.py'], env=env, cwd=fuzz_dir)
def run_test_realm(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
test_dir = os.path.join(root_dir, 'test/realm')
cmd([make_exe, '-C', test_dir, 'DEBUG=0', 'clean'], env=env)
cmd([make_exe, '-C', test_dir, 'DEBUG=0', '-j', str(thread_count), 'build'], env=env)
cmd([make_exe, '-C', test_dir, 'DEBUG=0', 'run_all'], env=env, timelimit=timelimit)
def run_test_external(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
# Realm perf test (move back to perf test when integrated with perf.py)
perf_dir = os.path.join(root_dir, 'test/performance/realm')
cmd([make_exe, '-C', perf_dir, 'DEBUG=0', 'clean_all'], env=env)
cmd([make_exe, '-C', perf_dir, 'DEBUG=0', 'build_all'], env=env)
cmd([make_exe, '-C', perf_dir, 'DEBUG=0', 'RUNMODE=short', 'run_all'], env=env, timelimit=timelimit)
# Fast Direct Solver
# Contact: Chao Chen <cchen10@stanford.edu>
solver_dir = os.path.join(tmp_dir, 'fastSolver2')
cmd(['git', 'clone', 'https://github.com/Charles-Chao-Chen/fastSolver2.git', solver_dir])
# cmd(['git', 'checkout', '4c7a59de63dd46a0abcc7f296fa3b0f511e5e6d2', ], cwd=solver_dir)
solver = [[os.path.join(solver_dir, 'spmd_driver/solver'),
['-machine', '1', '-core', '8', '-mtxlvl', '6', '-ll:cpu', '8', '-ll:csize', '1024']]]
run_cxx(solver, flags, launcher, root_dir, None, env, thread_count, timelimit)
# Parallel Research Kernels: Stencil
# Contact: Wonchan Lee <wonchan@cs.stanford.edu>
prk_dir = os.path.join(tmp_dir, 'prk')
cmd(['git', 'clone', 'https://github.com/magnatelee/PRK.git', prk_dir])
# This uses a custom Makefile that requires additional
# configuration. Rather than go to that trouble it's easier to
# just use a copy of the standard Makefile template.
stencil_dir = os.path.join(prk_dir, 'LEGION', 'Stencil')
stencil_env = dict(list(env.items()) + [
('OUTFILE', 'stencil'),
('GEN_SRC', 'stencil.cc'),
('CC_FLAGS', (env['CC_FLAGS'] if 'CC_FLAGS' in env else '') +
' -DRADIUS=2 -DRESTRICT_KEYWORD -DDISABLE_BARRIER_MIGRATION'),
])
makefile = os.path.join(root_dir, 'apps/Makefile.template')
cmd([make_exe, '-f', makefile, '-C', stencil_dir, '-j', str(thread_count)], env=stencil_env)
stencil = os.path.join(stencil_dir, 'stencil')
cmd([stencil, '4', '10', '1000'], timelimit=timelimit)
# SNAP
# Contact: Mike Bauer <mbauer@nvidia.com>
snap_dir = os.path.join(tmp_dir, 'snap')
cmd(['git', 'clone', 'https://github.com/StanfordLegion/Legion-SNAP.git', snap_dir])
# This can't handle flags before application arguments, so place
# them after.
snap = [[os.path.join(snap_dir, 'src/snap'),
[os.path.join(snap_dir, 'input/mms.in')] + flags]]
run_cxx(snap, [], launcher, root_dir, None, env, thread_count, timelimit)
# Soleil-X
# Contact: Manolis Papadakis <mpapadak@stanford.edu>
soleil_dir = os.path.join(tmp_dir, 'soleil-x')
cmd(['git', 'clone', 'https://github.com/stanfordhpccenter/soleil-x.git', soleil_dir])
soleil_env = dict(list(env.items()) + [
('LEGION_DIR', root_dir),
('SOLEIL_DIR', soleil_dir),
('CC', 'gcc'),
])
cmd([make_exe, '-C', os.path.join(soleil_dir, 'src')], env=soleil_env)
# FIXME: Actually run it
# TaskAMR
# Contact: Jonathan Graham <jgraham@lanl.gov>
task_amr_dir = os.path.join(tmp_dir, 'task_amr')
cmd(['git', 'clone', 'https://github.com/lanl/TaskAMR.git', task_amr_dir])
task_amr_env = dict(list(env.items()) + [
('LEGION_ROOT', root_dir),
])
cmd([make_exe, '-C', os.path.join(task_amr_dir)], env=task_amr_env)
# Barnes-Hut
# Contact: Haithem Turki <turki.haithem@gmail.com>
barnes_hut_dir = os.path.join(tmp_dir, 'barnes_hut')
cmd(['git', 'clone', 'https://github.com/StanfordLegion/barnes-hut.git', barnes_hut_dir])
regent_path = os.path.join(root_dir, 'language', 'regent.py')
cmd([sys.executable, regent_path, 'hdf5_converter.rg',
'-i', 'input/bodies-16384-blitz.csv',
'-o', 'bodies-16384-blitz.h5',
'-n', '16384'],
cwd=barnes_hut_dir,
env=env,
timelimit=timelimit)
cmd([sys.executable, regent_path, 'barnes_hut.rg',
'-i', 'bodies-16384-blitz.h5',
'-n', '16384'],
cwd=barnes_hut_dir,
env=env,
timelimit=timelimit)
def run_test_private(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
flags = ['-logfile', 'out_%.log']
# MiniAero
# Contact: Wonchan Lee <wonchan@cs.stanford.edu>
miniaero_dir = os.path.join(tmp_dir, 'miniaero-spmd')
cmd(['git', 'clone', '-b', 'spmd_flattened_superblocks',
'git@github.com:magnatelee/miniaero-spmd.git', miniaero_dir])
cmd([make_exe, '-C', miniaero_dir, '-j', str(thread_count)], env=env,
cwd=miniaero_dir)
for test in ['3D_Sod', '3D_Sod_2nd_Order'
# These tests take a long time so skip them by default.
# , 'FlatPlate', 'Ramp'
]:
test_dir = os.path.join(miniaero_dir, 'tests', test)
cmd([os.path.join(test_dir, 'test.sh')], env=env, cwd=test_dir, timelimit=timelimit)
# PENNANT
# Contact: Galen Shipman <gshipman@lanl.gov>
pennant_dir = os.path.join(tmp_dir, 'pennant')
cmd(['git', 'clone', '-b', 'spmdv2',
'git@github.com:gshipman/pennant-legion.git', pennant_dir])
# This uses a custom Makefile that requires additional
# configuration. Rather than go to that trouble it's easier to
# just use a copy of the standard Makefile template.
pennant_env = dict(list(env.items()) + [
('OUTFILE', 'pennant'),
('GEN_SRC', ' '.join(glob.glob(os.path.join(pennant_dir, 'src/*.cc')))),
('CC_FLAGS', (env['CC_FLAGS'] if 'CC_FLAGS' in env else '') +
' -std=c++11 -Wno-sign-compare -Wno-unknown-pragmas -Wno-unused-variable' +
' -D__STDC_FORMAT_MACROS -DDISABLE_BARRIER_MIGRATION'),
('WARN_AS_ERROR', '0'),
])
makefile = os.path.join(root_dir, 'apps/Makefile.template')
# Previous build uses -DASSUME_UNALLOCABLE. Clean first to get a fresh environment.
cmd([make_exe, '-f', makefile, '-C', pennant_dir, 'clean'], env=pennant_env)
cmd([make_exe, '-f', makefile, '-C', pennant_dir, '-j', str(thread_count)], env=pennant_env)
pennant = os.path.join(pennant_dir, 'pennant')
cmd([pennant, str(app_cores), 'test/sedovsmall/sedovsmall.pnt', '-ll:cpu', str(app_cores)],
cwd=pennant_dir,
timelimit=timelimit)
# HTR
# Contact: Mario Di Renzo <direnzo.mario1@gmail.com>
htr_dir = os.path.join(tmp_dir, 'htr')
cmd(['git', 'clone', '-b', 'Develop', 'git@gitlab.com:mario.direnzo/Prometeo.git', htr_dir])
htr_env = dict(list(env.items()) + [
('LEGION_DIR', root_dir),
('LD_LIBRARY_PATH', os.path.join(root_dir, 'bindings', 'regent')),
('HTR_DIR', htr_dir),
('CC', 'gcc'),
])
cmd(['python3', os.path.join(htr_dir, 'unitTests', 'testAll.py')], env=htr_env)
def run_test_ctest(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit):
build_dir = os.path.join(tmp_dir, 'build')
args = ['ctest', '-j', str(thread_count), '--output-on-failure']
if timelimit:
args.extend(['--timeout', str(timelimit)])
cmd(args,
env=env,
cwd=build_dir)
def hostname():
return subprocess.check_output(['hostname']).strip()
def git_commit_id(repo_dir):
return subprocess.check_output(
['git', 'rev-parse', 'HEAD'], cwd=repo_dir).strip()
def git_branch_name(repo_dir):
proc = subprocess.Popen(
['git', 'symbolic-ref', '--short', 'HEAD'], cwd=repo_dir,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, errors = proc.communicate()
if proc.returncode == 0:
return output.strip()
return None
def run_test_perf_one_configuration(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, nodes, timelimit):
flags = ['-logfile', 'out_%.log']
# for backward-compatibility, use app_cores if PERF_CORES_PER_NODE is not specified
cores_per_node = int(os.environ.get('PERF_CORES_PER_NODE', app_cores))
legion_cxx_perf_tests = get_legion_cxx_perf_tests(nodes, cores_per_node)
regent_perf_tests = get_regent_perf_tests(nodes, cores_per_node)
# Regent needs special flags when in precompile mode
precompile = os.environ.get('PERF_PRECOMPILE_REGENT') == '1'
# Performance test configuration:
metadata = {
'host': (os.environ['CI_RUNNER_DESCRIPTION']
if 'CI_RUNNER_DESCRIPTION' in os.environ else hostname()),
'nodes': nodes,
'commit': (os.environ['CI_BUILD_REF'] if 'CI_BUILD_REF' in os.environ
else git_commit_id(root_dir)),
'branch': (os.environ['CI_BUILD_REF_NAME'] if 'CI_BUILD_REF_NAME' in os.environ
else git_branch_name(root_dir)),
}
cxx_measurements = {
# Hack: Use the command name as the benchmark name.
'benchmark': {
'type': 'argv',
'index': 0,
'filter': 'basename',
},
# Capture command line arguments following flags.
'argv': {
'type': 'argv',
'start': 1 + len(flags),
},
# Record running time in seconds.
'time_seconds': {
'type': 'regex',
'pattern': r'^ELAPSED TIME\s*=\s*(.*) s$',
'multiline': True,
}
}
regent_measurements = {
# Hack: Use the command name as the benchmark name.
'benchmark': {
'type': 'argv',
'index': 1,
'filter': 'basename',
},
# Capture command line arguments following flags.
'argv': {
'type': 'argv',
'start': 2,# + len(flags), # FIXME: Skipping flags, see below.
},
# Record running time in seconds.
'time_seconds': {
'type': 'command',
'args': [
os.path.join(root_dir, 'language/scripts/summarize.py'),
'--machine-readable', '-',
],
}
}
env = dict(list(env.items()) + [
('PERF_OWNER', 'StanfordLegion'),
('PERF_REPOSITORY', 'perf-data'),
('PERF_METADATA', json.dumps(metadata)),
])
cxx_env = dict(list(env.items()) + [
('PERF_MEASUREMENTS', json.dumps(cxx_measurements)),
# Launch through perf.py
('PERF_LAUNCHER', ' '.join(launcher)),
('LAUNCHER', ''),
])
regent_env = dict(list(env.items()) + [
('PERF_MEASUREMENTS', json.dumps(regent_measurements)),
# Launch through regent.py
('PERF_LAUNCHER', ' '.join(launcher) if precompile else ''),
('LAUNCHER', '' if precompile else ' '.join(launcher)),
])
# Build Regent first to avoid recompiling later.
cmd([sys.executable, os.path.join(root_dir, 'language/travis.py'), '--install-only'], env=env)
# Run Legion C++ performance tests.
runner = os.path.join(root_dir, 'perf.py')
run_cxx(legion_cxx_perf_tests, flags, [runner], root_dir, bin_dir, cxx_env, thread_count, timelimit)
# Run Regent performance tests.
regent_path = os.path.join(root_dir, 'language/regent.py')
if precompile:
# Precompile executables.
build_env = dict(list(env.items()) + [
('SAVEOBJ', '1'),
('STANDALONE', '1'),
('LAUNCHER', ''),
])
exe_tests = precompile_regent(regent_perf_tests, [], [regent_path], root_dir, build_env, thread_count)
# FIXME: PENNANT can't handle the -logfile flag coming first, so just skip it.
run_regent(exe_tests, [], [runner], root_dir, regent_env, thread_count)
else:
# FIXME: PENNANT can't handle the -logfile flag coming first, so just skip it.
run_regent(regent_perf_tests, [], [runner, regent_path], root_dir, regent_env, thread_count)
# Render the final charts.
subprocess.check_call(
[sys.executable,
os.path.join(root_dir, 'tools', 'perf_chart.py'),
'git@github.com:StanfordLegion/perf-data.git',
'git@github.com:StanfordLegion/perf-data.git'],
env=env)
def run_test_perf(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, min_nodes, max_nodes, timelimit):
nodes = min_nodes
while nodes <= max_nodes:
launcher = [w.format(**{'NODES': nodes}) for w in launcher]
run_test_perf_one_configuration(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, nodes, timelimit)
nodes *= 2
def check_test_legion_cxx(root_dir):
print('Checking that tests that SHOULD be tested are ACTUALLY tested...')
print()
# These are the directories we SHOULD have coverage for.
should_dirs = ['tutorial', 'examples', 'test']
should_tests = []
for dir in should_dirs:
entries = os.listdir(os.path.join(root_dir, dir))
for entry in entries:
if os.path.isdir(os.path.join(root_dir, dir, entry)):
should_tests.append(os.path.join(dir, entry))
assert len(should_tests) > 0
# These are the tests we ACTUALLY have coverage for.
tests = legion_cxx_tests + legion_network_cxx_tests + \
legion_openmp_cxx_tests + legion_python_cxx_tests + \
legion_hdf_cxx_tests + legion_kokkos_cxx_tests + \
legion_fortran_tests
actual_tests = set()
for test_file, test_flags in tests:
actual_tests.add(os.path.dirname(test_file))
actual_tests.add('test/realm') # We test Realm separately.
actual_tests.add('test/performance') # We test performance separately.
# Check that all tests that SHOULD be covered are ACTUALLY covered.
not_tests = []
for should_test in should_tests:
if should_test not in actual_tests:
not_tests.append(should_test)
if len(not_tests) > 0:
print('The following tests are NOT currently being tested:')
print()
for not_test in not_tests:
print(' %s' % not_test)
print()
raise Exception('There are tests that are NOT in the test suite')
def build_cmake(root_dir, tmp_dir, env, thread_count,
test_regent, test_legion_cxx,
test_external, test_perf, test_ctest):
build_dir = os.path.join(tmp_dir, 'build')
install_dir = os.path.join(tmp_dir, 'install')
os.mkdir(build_dir)
os.mkdir(install_dir)
cmdline = ['cmake', '-DCMAKE_INSTALL_PREFIX=%s' % install_dir ]
cmdline.append('-DCMAKE_BUILD_TYPE=%s' % ('Debug' if env['DEBUG'] == '1' else
'Release'))
cmdline.append('-DLegion_BUILD_WARN_AS_ERROR=%s' % ('ON' if env.get('WARN_AS_ERROR','0') == '1' else 'OFF'))
cmdline.append('-DLegion_MAX_DIM=%s' % env['MAX_DIM'])
cmdline.append('-DLegion_NETWORKS=%s' % env['REALM_NETWORKS'])
cmdline.append('-DLegion_USE_CUDA=%s' % ('ON' if env['USE_CUDA'] == '1' else 'OFF'))
if 'GPU_ARCH' in env:
cmdline.append('-DLegion_CUDA_ARCH=%s' % env['GPU_ARCH'])
cmdline.append('-DLegion_USE_OpenMP=%s' % ('ON' if env['USE_OPENMP'] == '1' else 'OFF'))
cmdline.append('-DLegion_USE_Kokkos=%s' % ('ON' if env['USE_KOKKOS'] == '1' else 'OFF'))
cmdline.append('-DLegion_USE_Python=%s' % ('ON' if env['USE_PYTHON'] == '1' else 'OFF'))
cmdline.append('-DLegion_USE_LLVM=%s' % ('ON' if env['USE_LLVM'] == '1' else 'OFF'))
cmdline.append('-DLegion_USE_HDF5=%s' % ('ON' if env['USE_HDF'] == '1' else 'OFF'))
cmdline.append('-DLegion_USE_Fortran=%s' % ('ON' if env['LEGION_USE_FORTRAN'] == '1' else 'OFF'))
cmdline.append('-DLegion_SPY=%s' % ('ON' if env['USE_SPY'] == '1' else 'OFF'))
cmdline.append('-DLegion_BOUNDS_CHECKS=%s' % ('ON' if env['BOUNDS_CHECKS'] == '1' else 'OFF'))
cmdline.append('-DLegion_PRIVILEGE_CHECKS=%s' % ('ON' if env['PRIVILEGE_CHECKS'] == '1' else 'OFF'))
if 'LEGION_WARNINGS_FATAL' in env:
cmdline.append('-DLegion_WARNINGS_FATAL=%s' % ('ON' if env['LEGION_WARNINGS_FATAL'] == '1' else 'OFF'))
if test_ctest:
cmdline.append('-DLegion_ENABLE_TESTING=ON')
if 'LAUNCHER' in env:
cmdline.append('-DLegion_TEST_LAUNCHER=%s' % env['LAUNCHER'])
else:
cmdline.append('-DLegion_ENABLE_TESTING=OFF')
if 'CC_FLAGS' in env:
cmdline.append('-DCMAKE_CXX_FLAGS=%s' % env['CC_FLAGS'])
if test_regent or test_legion_cxx or test_external or test_perf or test_ctest:
cmdline.append('-DLegion_BUILD_ALL=ON')
# several different conditions force the use of shared libraries
if test_regent or test_external or (env['USE_PYTHON'] == '1') or (env['SHARED_OBJECTS'] == '1'):
cmdline.append('-DBUILD_SHARED_LIBS=ON')
else:
cmdline.append('-DBUILD_SHARED_LIBS=OFF')
# last argument to cmake is the root of the tree
cmdline.append(root_dir)
cmd(cmdline, env=env, cwd=build_dir)
cmd([make_exe, '-C', build_dir, '-j', str(thread_count)], env=env)
cmd([make_exe, '-C', build_dir, 'install'], env=env)
return os.path.join(build_dir, 'bin')
def build_regent(root_dir, env):
cmd([os.path.join(root_dir, 'language/travis.py'), '--install-only'], env=env)
def clean_cxx(tests, root_dir, env, thread_count):
env = dict(list(env.items()) + [
('MAKEFLAGS', 's'), # Always silence initial clean.
])
for test_file, test_flags in tests:
test_dir = os.path.dirname(os.path.join(root_dir, test_file))
cmd([make_exe, '-C', test_dir, 'clean'], env=env)
def build_make_clean(root_dir, env, thread_count, test_legion_cxx, test_perf,
test_external, test_private):
# External and private also require cleaning, even though they get
# built separately.
if test_legion_cxx or test_perf or test_external or test_private:
clean_cxx(legion_cxx_tests, root_dir, env, thread_count)
if test_legion_cxx and env['LEGION_USE_FORTRAN'] == '1':
clean_cxx(legion_fortran_tests, root_dir, env, thread_count)
def option_enabled(option, options, default, envprefix='', envname=None):
if options is not None: return option in options
if envname is not None:
option_var = envname
else:
option_var = '%s%s' % (envprefix, option.upper())
if option_var in os.environ: return os.environ[option_var] == '1'
return default
class Stage(object):
__slots__ = ['name', 'begin_time']
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin_time = datetime.datetime.now()
print()
print('#'*60)
print('### Entering Stage: %s' % self.name)
print('#'*60)
print()
sys.stdout.flush()
def __exit__(self, exc_type, exc_val, exc_tb):
end_time = datetime.datetime.now()
print()
print('#'*60)
print('### Exiting Stage: %s' % self.name)
print('### * Exception Type: %s' % exc_type)
print('### * Elapsed Time: %s' % (end_time - self.begin_time))
print('#'*60)
print()
sys.stdout.flush()
def report_mode(debug, max_dim, launcher,
test_regent, test_legion_cxx, test_fuzzer, test_realm,
test_external, test_private, test_perf, test_ctest, networks,
use_cuda, use_openmp, use_kokkos, use_python, use_llvm,
use_hdf, use_fortran, use_spy, use_prof,
use_bounds_checks, use_privilege_checks, use_shared_objects,
use_gcov, use_cmake, use_rdir):
print()
print('#'*60)
print('### Test Suite Configuration')
print('###')
print('### Python:')
print('\n'.join(['### ' + line for line in sys.version.split('\n')]))
print('###')
print('### Debug: %s' % debug)
print('### Launcher: %s' % launcher)
print('###')
print('### Running Tests:')
print('### * Regent: %s' % test_regent)
print('### * Legion C++: %s' % test_legion_cxx)
print('### * Fuzzer: %s' % test_fuzzer)
print('### * Realm: %s' % test_realm)
print('### * External: %s' % test_external)
print('### * Private: %s' % test_private)
print('### * Perf: %s' % test_perf)
print('### * CTest: %s' % test_ctest)
print('###')
print('### Build Flags:')
print('### * Networks: %s' % networks)
print('### * CUDA: %s' % use_cuda)
print('### * OpenMP: %s' % use_openmp)
print('### * Kokkos: %s' % use_kokkos)
print('### * Python: %s' % use_python)
print('### * LLVM: %s' % use_llvm)
print('### * HDF5: %s' % use_hdf)
print('### * Fortran: %s' % use_fortran)
print('### * Spy: %s' % use_spy)
print('### * Prof: %s' % use_prof)
print('### * Bounds: %s' % use_bounds_checks)
print('### * Privilege: %s' % use_privilege_checks)
print('### * Shared Obj: %s' % use_shared_objects)
print('### * Gcov: %s' % use_gcov)
print('### * CMake: %s' % use_cmake)
print('### * RDIR: %s' % use_rdir)
print('### * Max DIM: %s' % max_dim)
print('#'*60)
print()
sys.stdout.flush()
def run_tests(test_modules=None,
debug=True,
max_dim=3,
use_features=None,
networks='',
launcher=None,
thread_count=None,
root_dir=None,
check_ownership=False,
keep_tmp_dir=False,
timelimit=None,
verbose=False):
if thread_count is None:
thread_count = multiprocessing.cpu_count()
if root_dir is None:
root_dir = os.path.dirname(os.path.realpath(__file__))
if timelimit is None:
if 'TIMELIMIT' in os.environ:
timelimit = int(os.environ['TIMELIMIT'])
# Determine which test modules to run.
def module_enabled(module, default=True, prefix='TEST_', **kwargs):
return option_enabled(module, test_modules, default,
envprefix=prefix, **kwargs)
test_regent = module_enabled('regent')
test_legion_cxx = module_enabled('legion_cxx')
test_fuzzer = module_enabled('fuzzer', False)
test_realm = module_enabled('realm', not debug)
test_external = module_enabled('external', False)
test_private = module_enabled('private', False)
test_perf = module_enabled('perf', False)
test_ctest = module_enabled('ctest', False)
# Determine which features to build with.
def feature_enabled(feature, default=True, prefix='USE_', **kwargs):
return option_enabled(feature, use_features, default,
envprefix=prefix, **kwargs)
use_cuda = feature_enabled('cuda', False)
use_openmp = feature_enabled('openmp', False)
use_kokkos = feature_enabled('kokkos', False)
use_python = feature_enabled('python', False)
use_llvm = feature_enabled('llvm', False)
use_hdf = feature_enabled('hdf', False)
use_fortran = feature_enabled('fortran', False, prefix='LEGION_USE_')
use_spy = feature_enabled('spy', False)
use_prof = feature_enabled('prof', False)
use_bounds_checks = feature_enabled('bounds', False,
envname='BOUNDS_CHECKS')
use_privilege_checks = feature_enabled('privilege', False,
envname='PRIVILEGE_CHECKS')
use_gcov = feature_enabled('gcov', False)
use_cmake = feature_enabled('cmake', False)
use_rdir = feature_enabled('rdir', True)
use_shared_objects = feature_enabled('shared', False,
envname='SHARED_OBJECTS')
if use_kokkos and not use_cmake:
raise Exception('Kokkos support requires use of CMake')
# Determine parameters for performance tests.
if test_perf:
if 'PERF_MIN_NODES' not in os.environ:
raise Exception('Performance tests requested but PERF_MIN_NODES is not set')
min_nodes = int(os.environ['PERF_MIN_NODES'])
if 'PERF_MAX_NODES' not in os.environ:
raise Exception('Performance tests requested but PERF_MAX_NODES is not set')
max_nodes = int(os.environ['PERF_MAX_NODES'])
if test_perf and debug:
raise Exception('Performance tests requested but DEBUG is enabled')
if test_ctest and not use_cmake:
raise Exception('CTest cannot be used without CMake')
if networks and launcher is None:
raise Exception('Network(s) is enabled but launcher is not set (use --launcher or LAUNCHER)')
launcher = launcher.split() if launcher is not None else []
gcov_flags = ' -ftest-coverage -fprofile-arcs'
if check_ownership:
check_test_legion_cxx(root_dir)
return
report_mode(debug, max_dim, launcher,
test_regent, test_legion_cxx, test_fuzzer, test_realm,
test_external, test_private, test_perf, test_ctest,
networks,
use_cuda, use_openmp, use_kokkos, use_python, use_llvm,
use_hdf, use_fortran, use_spy, use_prof,
use_bounds_checks, use_privilege_checks, use_shared_objects,
use_gcov, use_cmake, use_rdir)
tmp_dir = tempfile.mkdtemp(dir=root_dir)
if verbose:
print('Using build directory: %s' % tmp_dir)
print()
# Normalize the test environment.
env = dict(list(os.environ.items()) + [
('DEBUG', '1' if debug else '0'),
('LAUNCHER', ' '.join(launcher)),
('REALM_NETWORKS', networks),
('USE_CUDA', '1' if use_cuda else '0'),
('TEST_CUDA', '1' if use_cuda else '0'),
('USE_OPENMP', '1' if use_openmp else '0'),
('TEST_OPENMP', '1' if use_openmp else '0'),
('USE_KOKKOS', '1' if use_kokkos else '0'),
('TEST_KOKKOS', '1' if use_kokkos else '0'),
('USE_PYTHON', '1' if use_python else '0'),
('TEST_PYTHON', '1' if use_python else '0'),
('USE_LLVM', '1' if use_llvm else '0'),
('USE_HDF', '1' if use_hdf else '0'),
('TEST_HDF', '1' if use_hdf else '0'),
('LEGION_USE_FORTRAN', '1' if use_fortran else '0'),
('TEST_FORTRAN', '1' if use_fortran else '0'),
('USE_SPY', '1' if use_spy else '0'),
('TEST_SPY', '1' if use_spy else '0'),
('USE_PROF', '1' if use_prof else '0'),
('TEST_PROF', '1' if use_prof else '0'),
('BOUNDS_CHECKS', '1' if use_bounds_checks else '0'),
('PRIVILEGE_CHECKS', '1' if use_privilege_checks else '0'),
('SHARED_OBJECTS', '1' if use_shared_objects else '0'),
('TEST_GCOV', '1' if use_gcov else '0'),
('USE_RDIR', '1' if use_rdir else '0'),
('MAX_DIM', str(max_dim)),
('LG_RT_DIR', os.path.join(root_dir, 'runtime')),
('DEFINE_HEADERS_DIR', os.path.join(root_dir, 'runtime')),
('CMAKE_BUILD_DIR', os.path.join(tmp_dir, 'build'))] + (
# Gcov doesn't get a USE_GCOV flag, but instead stuff the GCC
# options for Gcov on to the compile and link flags.
[('CC_FLAGS', (os.environ['CC_FLAGS'] + gcov_flags
if 'CC_FLAGS' in os.environ else gcov_flags)),
('LD_FLAGS', (os.environ['LD_FLAGS'] + gcov_flags
if 'LD_FLAGS' in os.environ else gcov_flags)),
] if use_gcov else []))
try:
# Build tests.
with Stage('build'):
if use_cmake:
bin_dir = build_cmake(
root_dir, tmp_dir, env, thread_count,
test_regent, test_legion_cxx, test_external,
test_perf, test_ctest)
else:
# With GNU Make, builds happen inline. But clean here.
build_make_clean(
root_dir, env, thread_count, test_legion_cxx, test_perf,
# These configurations also need to be cleaned first.
test_external, test_private)
bin_dir = None
# Run tests.
if test_regent:
with Stage('regent'):
run_test_regent(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_legion_cxx:
with Stage('legion_cxx'):
run_test_legion_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if networks:
run_test_legion_network_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if use_openmp:
run_test_legion_openmp_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if use_kokkos:
run_test_legion_kokkos_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if use_python:
run_test_legion_python_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if use_hdf:
run_test_legion_hdf_cxx(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if use_fortran:
run_test_legion_fortran(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if test_fuzzer:
with Stage('fuzzer'):
run_test_fuzzer(launcher, root_dir, tmp_dir, bin_dir, env, thread_count)
if test_realm:
with Stage('realm'):
run_test_realm(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if test_external:
with Stage('external'):
if not test_regent:
build_regent(root_dir, env)
run_test_external(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if test_private:
with Stage('private'):
run_test_private(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
if test_perf:
with Stage('perf'):
run_test_perf(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, min_nodes, max_nodes, timelimit)
if test_ctest:
with Stage('ctest'):
run_test_ctest(launcher, root_dir, tmp_dir, bin_dir, env, thread_count, timelimit)
finally:
if keep_tmp_dir:
print('Leaving build directory:')
print(' %s' % tmp_dir)
else:
if verbose:
print('Removing build directory:')
print(' %s' % tmp_dir)
shutil.rmtree(tmp_dir)
# behaves enough like a normal list for ArgumentParser's needs, except for
# the __contains__ method, which accepts a list of values and checks each
# one for membership
class MultipleChoiceList(object):
def __init__(self, *args):
self.list = list(args)
def __contains__(self, x):
if type(x) is list:
for v in x:
if v not in self.list:
return False
return True
else:
return x in self.list
def __iter__(self):
return self.list.__iter__()
class ExtendAction(argparse.Action):
def __init__(self, **kwargs):
super(ExtendAction, self).__init__(**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest, None)
items = items[:] if items else []
if type(values) is list:
items.extend(values)
else:
items.append(values)
setattr(namespace, self.dest, items)
def driver():
parser = argparse.ArgumentParser(
description = 'Legion test suite')
# What tests to run:
parser.add_argument(
'--test', dest='test_modules', action=ExtendAction,
choices=MultipleChoiceList('regent', 'legion_cxx', 'fuzzer',
'realm', 'external',
'private', 'perf', 'ctest'),
type=lambda s: s.split(','),
default=None,
help='Test modules to run (also via TEST_*).')
# Build options:
parser.add_argument(
'--debug', dest='debug', action='store_true',
default=os.environ['DEBUG'] == '1' if 'DEBUG' in os.environ else True,
help='Build Legion in debug mode (also via DEBUG).')
parser.add_argument(
'--no-debug', dest='debug', action='store_false',
help='Disable debug mode (equivalent to DEBUG=0).')
parser.add_argument(
'--max-dim', dest='max_dim', type=int,
default=int(os.environ['MAX_DIM']) if 'MAX_DIM' in os.environ else 3,
help='Maximum number of dimensions (also via MAX_DIM).')
parser.add_argument(
'--use', dest='use_features', action=ExtendAction,
choices=MultipleChoiceList('gasnet', 'cuda', 'openmp', 'kokkos',
'python', 'llvm', 'hdf', 'fortran', 'spy', 'prof',
'bounds', 'privilege',
'gcov', 'cmake', 'rdir'),
type=lambda s: s.split(','),
default=None,
help='Build Legion with features (also via USE_*).')
parser.add_argument(
'--network', dest='networks', action='store',
default=os.environ.get('REALM_NETWORKS', 'gasnet1' if os.environ.get('USE_GASNET', '0') == '1' else ''),
help='Network backend(s) to build with')
parser.add_argument(
'--launcher', dest='launcher', action='store',
default=os.environ['LAUNCHER'] if 'LAUNCHER' in os.environ else None,
help='Launcher for Legion tests (also via LAUNCHER).')
parser.add_argument(
'-C', '--directory', dest='root_dir', metavar='DIR', action='store', required=False,
help='Legion root directory.')
parser.add_argument(
'-j', dest='thread_count', nargs='?', type=int,
help='Number threads used to compile.')
parser.add_argument(
'--check', dest='check_ownership', action='store_true',
help='Check for tests that are being skipped.')
parser.add_argument(
'--keep', dest='keep_tmp_dir', action='store_true',
help='Keep temporary directory.')
parser.add_argument(
'--timelimit', dest='timelimit', type=int,
help='Maximum time (in seconds) allowed for individual test execution')
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true',
help='Print more debugging information.')
args = parser.parse_args()
run_tests(**vars(args))
if __name__ == '__main__':
driver()
| 45.487589 | 166 | 0.616644 |
acde9caae4a97dc1f99d5d47936aaa0ad6fc6bd9 | 45,620 | py | Python | gitee/models/project.py | pygitee/pygitee | 7622314a4dbb08cf2f729b6cdd0a2887b96e394e | [
"MIT"
] | null | null | null | gitee/models/project.py | pygitee/pygitee | 7622314a4dbb08cf2f729b6cdd0a2887b96e394e | [
"MIT"
] | null | null | null | gitee/models/project.py | pygitee/pygitee | 7622314a4dbb08cf2f729b6cdd0a2887b96e394e | [
"MIT"
] | null | null | null | # coding: utf-8
import pprint
import re # noqa: F401
import six
class Project(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'full_name': 'str',
'human_name': 'str',
'url': 'str',
'namespace': 'object',
'path': 'str',
'name': 'str',
'owner': 'str',
'description': 'str',
'private': 'str',
'public': 'str',
'internal': 'str',
'fork': 'str',
'html_url': 'str',
'ssh_url': 'str',
'forks_url': 'str',
'keys_url': 'str',
'collaborators_url': 'str',
'hooks_url': 'str',
'branches_url': 'str',
'tags_url': 'str',
'blobs_url': 'str',
'stargazers_url': 'str',
'contributors_url': 'str',
'commits_url': 'str',
'comments_url': 'str',
'issue_comment_url': 'str',
'issues_url': 'str',
'pulls_url': 'str',
'milestones_url': 'str',
'notifications_url': 'str',
'labels_url': 'str',
'releases_url': 'str',
'recommend': 'str',
'homepage': 'str',
'language': 'str',
'forks_count': 'str',
'stargazers_count': 'str',
'watchers_count': 'str',
'default_branch': 'str',
'open_issues_count': 'int',
'has_issues': 'str',
'has_wiki': 'str',
'issue_comment': 'str',
'can_comment': 'str',
'pull_requests_enabled': 'str',
'has_page': 'str',
'license': 'str',
'outsourced': 'str',
'project_creator': 'str',
'members': 'str',
'pushed_at': 'str',
'created_at': 'str',
'updated_at': 'str',
'parent': 'Project',
'paas': 'str',
'stared': 'str',
'watched': 'str',
'permission': 'str',
'relation': 'str',
'assignees_number': 'str',
'testers_number': 'str',
'assignees': 'list[str]',
'testers': 'list[str]'
}
attribute_map = {
'id': 'id',
'full_name': 'full_name',
'human_name': 'human_name',
'url': 'url',
'namespace': 'namespace',
'path': 'path',
'name': 'name',
'owner': 'owner',
'description': 'description',
'private': 'private',
'public': 'public',
'internal': 'internal',
'fork': 'fork',
'html_url': 'html_url',
'ssh_url': 'ssh_url',
'forks_url': 'forks_url',
'keys_url': 'keys_url',
'collaborators_url': 'collaborators_url',
'hooks_url': 'hooks_url',
'branches_url': 'branches_url',
'tags_url': 'tags_url',
'blobs_url': 'blobs_url',
'stargazers_url': 'stargazers_url',
'contributors_url': 'contributors_url',
'commits_url': 'commits_url',
'comments_url': 'comments_url',
'issue_comment_url': 'issue_comment_url',
'issues_url': 'issues_url',
'pulls_url': 'pulls_url',
'milestones_url': 'milestones_url',
'notifications_url': 'notifications_url',
'labels_url': 'labels_url',
'releases_url': 'releases_url',
'recommend': 'recommend',
'homepage': 'homepage',
'language': 'language',
'forks_count': 'forks_count',
'stargazers_count': 'stargazers_count',
'watchers_count': 'watchers_count',
'default_branch': 'default_branch',
'open_issues_count': 'open_issues_count',
'has_issues': 'has_issues',
'has_wiki': 'has_wiki',
'issue_comment': 'issue_comment',
'can_comment': 'can_comment',
'pull_requests_enabled': 'pull_requests_enabled',
'has_page': 'has_page',
'license': 'license',
'outsourced': 'outsourced',
'project_creator': 'project_creator',
'members': 'members',
'pushed_at': 'pushed_at',
'created_at': 'created_at',
'updated_at': 'updated_at',
'parent': 'parent',
'paas': 'paas',
'stared': 'stared',
'watched': 'watched',
'permission': 'permission',
'relation': 'relation',
'assignees_number': 'assignees_number',
'testers_number': 'testers_number',
'assignees': 'assignees',
'testers': 'testers'
}
def __init__(self, id=None, full_name=None, human_name=None, url=None, namespace=None, path=None, name=None,
owner=None, description=None, private=None, public=None, internal=None, fork=None, html_url=None,
ssh_url=None, forks_url=None, keys_url=None, collaborators_url=None, hooks_url=None, branches_url=None,
tags_url=None, blobs_url=None, stargazers_url=None, contributors_url=None, commits_url=None,
comments_url=None, issue_comment_url=None, issues_url=None, pulls_url=None, milestones_url=None,
notifications_url=None, labels_url=None, releases_url=None, recommend=None, homepage=None,
language=None, forks_count=None, stargazers_count=None, watchers_count=None, default_branch=None,
open_issues_count=None, has_issues=None, has_wiki=None, issue_comment=None, can_comment=None,
pull_requests_enabled=None, has_page=None, license=None, outsourced=None, project_creator=None,
members=None, pushed_at=None, created_at=None, updated_at=None, parent=None, paas=None, stared=None,
watched=None, permission=None, relation=None, assignees_number=None, testers_number=None,
assignees=None, testers=None): # noqa: E501
"""Project - a model defined in Swagger""" # noqa: E501
self._id = None
self._full_name = None
self._human_name = None
self._url = None
self._namespace = None
self._path = None
self._name = None
self._owner = None
self._description = None
self._private = None
self._public = None
self._internal = None
self._fork = None
self._html_url = None
self._ssh_url = None
self._forks_url = None
self._keys_url = None
self._collaborators_url = None
self._hooks_url = None
self._branches_url = None
self._tags_url = None
self._blobs_url = None
self._stargazers_url = None
self._contributors_url = None
self._commits_url = None
self._comments_url = None
self._issue_comment_url = None
self._issues_url = None
self._pulls_url = None
self._milestones_url = None
self._notifications_url = None
self._labels_url = None
self._releases_url = None
self._recommend = None
self._homepage = None
self._language = None
self._forks_count = None
self._stargazers_count = None
self._watchers_count = None
self._default_branch = None
self._open_issues_count = None
self._has_issues = None
self._has_wiki = None
self._issue_comment = None
self._can_comment = None
self._pull_requests_enabled = None
self._has_page = None
self._license = None
self._outsourced = None
self._project_creator = None
self._members = None
self._pushed_at = None
self._created_at = None
self._updated_at = None
self._parent = None
self._paas = None
self._stared = None
self._watched = None
self._permission = None
self._relation = None
self._assignees_number = None
self._testers_number = None
self._assignees = None
self._testers = None
self.discriminator = None
if id is not None:
self.id = id
if full_name is not None:
self.full_name = full_name
if human_name is not None:
self.human_name = human_name
if url is not None:
self.url = url
if namespace is not None:
self.namespace = namespace
if path is not None:
self.path = path
if name is not None:
self.name = name
if owner is not None:
self.owner = owner
if description is not None:
self.description = description
if private is not None:
self.private = private
if public is not None:
self.public = public
if internal is not None:
self.internal = internal
if fork is not None:
self.fork = fork
if html_url is not None:
self.html_url = html_url
if ssh_url is not None:
self.ssh_url = ssh_url
if forks_url is not None:
self.forks_url = forks_url
if keys_url is not None:
self.keys_url = keys_url
if collaborators_url is not None:
self.collaborators_url = collaborators_url
if hooks_url is not None:
self.hooks_url = hooks_url
if branches_url is not None:
self.branches_url = branches_url
if tags_url is not None:
self.tags_url = tags_url
if blobs_url is not None:
self.blobs_url = blobs_url
if stargazers_url is not None:
self.stargazers_url = stargazers_url
if contributors_url is not None:
self.contributors_url = contributors_url
if commits_url is not None:
self.commits_url = commits_url
if comments_url is not None:
self.comments_url = comments_url
if issue_comment_url is not None:
self.issue_comment_url = issue_comment_url
if issues_url is not None:
self.issues_url = issues_url
if pulls_url is not None:
self.pulls_url = pulls_url
if milestones_url is not None:
self.milestones_url = milestones_url
if notifications_url is not None:
self.notifications_url = notifications_url
if labels_url is not None:
self.labels_url = labels_url
if releases_url is not None:
self.releases_url = releases_url
if recommend is not None:
self.recommend = recommend
if homepage is not None:
self.homepage = homepage
if language is not None:
self.language = language
if forks_count is not None:
self.forks_count = forks_count
if stargazers_count is not None:
self.stargazers_count = stargazers_count
if watchers_count is not None:
self.watchers_count = watchers_count
if default_branch is not None:
self.default_branch = default_branch
if open_issues_count is not None:
self.open_issues_count = open_issues_count
if has_issues is not None:
self.has_issues = has_issues
if has_wiki is not None:
self.has_wiki = has_wiki
if issue_comment is not None:
self.issue_comment = issue_comment
if can_comment is not None:
self.can_comment = can_comment
if pull_requests_enabled is not None:
self.pull_requests_enabled = pull_requests_enabled
if has_page is not None:
self.has_page = has_page
if license is not None:
self.license = license
if outsourced is not None:
self.outsourced = outsourced
if project_creator is not None:
self.project_creator = project_creator
if members is not None:
self.members = members
if pushed_at is not None:
self.pushed_at = pushed_at
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if parent is not None:
self.parent = parent
if paas is not None:
self.paas = paas
if stared is not None:
self.stared = stared
if watched is not None:
self.watched = watched
if permission is not None:
self.permission = permission
if relation is not None:
self.relation = relation
if assignees_number is not None:
self.assignees_number = assignees_number
if testers_number is not None:
self.testers_number = testers_number
if assignees is not None:
self.assignees = assignees
if testers is not None:
self.testers = testers
@property
def id(self):
"""Gets the id of this Project. # noqa: E501
:return: The id of this Project. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Project.
:param id: The id of this Project. # noqa: E501
:type: int
"""
self._id = id
@property
def full_name(self):
"""Gets the full_name of this Project. # noqa: E501
:return: The full_name of this Project. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this Project.
:param full_name: The full_name of this Project. # noqa: E501
:type: str
"""
self._full_name = full_name
@property
def human_name(self):
"""Gets the human_name of this Project. # noqa: E501
:return: The human_name of this Project. # noqa: E501
:rtype: str
"""
return self._human_name
@human_name.setter
def human_name(self, human_name):
"""Sets the human_name of this Project.
:param human_name: The human_name of this Project. # noqa: E501
:type: str
"""
self._human_name = human_name
@property
def url(self):
"""Gets the url of this Project. # noqa: E501
:return: The url of this Project. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Project.
:param url: The url of this Project. # noqa: E501
:type: str
"""
self._url = url
@property
def namespace(self):
"""Gets the namespace of this Project. # noqa: E501
:return: The namespace of this Project. # noqa: E501
:rtype: object
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this Project.
:param namespace: The namespace of this Project. # noqa: E501
:type: object
"""
self._namespace = namespace
@property
def path(self):
"""Gets the path of this Project. # noqa: E501
:return: The path of this Project. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this Project.
:param path: The path of this Project. # noqa: E501
:type: str
"""
self._path = path
@property
def name(self):
"""Gets the name of this Project. # noqa: E501
:return: The name of this Project. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Project.
:param name: The name of this Project. # noqa: E501
:type: str
"""
self._name = name
@property
def owner(self):
"""Gets the owner of this Project. # noqa: E501
:return: The owner of this Project. # noqa: E501
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""Sets the owner of this Project.
:param owner: The owner of this Project. # noqa: E501
:type: str
"""
self._owner = owner
@property
def description(self):
"""Gets the description of this Project. # noqa: E501
:return: The description of this Project. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Project.
:param description: The description of this Project. # noqa: E501
:type: str
"""
self._description = description
@property
def private(self):
"""Gets the private of this Project. # noqa: E501
:return: The private of this Project. # noqa: E501
:rtype: str
"""
return self._private
@private.setter
def private(self, private):
"""Sets the private of this Project.
:param private: The private of this Project. # noqa: E501
:type: str
"""
self._private = private
@property
def public(self):
"""Gets the public of this Project. # noqa: E501
:return: The public of this Project. # noqa: E501
:rtype: str
"""
return self._public
@public.setter
def public(self, public):
"""Sets the public of this Project.
:param public: The public of this Project. # noqa: E501
:type: str
"""
self._public = public
@property
def internal(self):
"""Gets the internal of this Project. # noqa: E501
:return: The internal of this Project. # noqa: E501
:rtype: str
"""
return self._internal
@internal.setter
def internal(self, internal):
"""Sets the internal of this Project.
:param internal: The internal of this Project. # noqa: E501
:type: str
"""
self._internal = internal
@property
def fork(self):
"""Gets the fork of this Project. # noqa: E501
:return: The fork of this Project. # noqa: E501
:rtype: str
"""
return self._fork
@fork.setter
def fork(self, fork):
"""Sets the fork of this Project.
:param fork: The fork of this Project. # noqa: E501
:type: str
"""
self._fork = fork
@property
def html_url(self):
"""Gets the html_url of this Project. # noqa: E501
:return: The html_url of this Project. # noqa: E501
:rtype: str
"""
return self._html_url
@html_url.setter
def html_url(self, html_url):
"""Sets the html_url of this Project.
:param html_url: The html_url of this Project. # noqa: E501
:type: str
"""
self._html_url = html_url
@property
def ssh_url(self):
"""Gets the ssh_url of this Project. # noqa: E501
:return: The ssh_url of this Project. # noqa: E501
:rtype: str
"""
return self._ssh_url
@ssh_url.setter
def ssh_url(self, ssh_url):
"""Sets the ssh_url of this Project.
:param ssh_url: The ssh_url of this Project. # noqa: E501
:type: str
"""
self._ssh_url = ssh_url
@property
def forks_url(self):
"""Gets the forks_url of this Project. # noqa: E501
:return: The forks_url of this Project. # noqa: E501
:rtype: str
"""
return self._forks_url
@forks_url.setter
def forks_url(self, forks_url):
"""Sets the forks_url of this Project.
:param forks_url: The forks_url of this Project. # noqa: E501
:type: str
"""
self._forks_url = forks_url
@property
def keys_url(self):
"""Gets the keys_url of this Project. # noqa: E501
:return: The keys_url of this Project. # noqa: E501
:rtype: str
"""
return self._keys_url
@keys_url.setter
def keys_url(self, keys_url):
"""Sets the keys_url of this Project.
:param keys_url: The keys_url of this Project. # noqa: E501
:type: str
"""
self._keys_url = keys_url
@property
def collaborators_url(self):
"""Gets the collaborators_url of this Project. # noqa: E501
:return: The collaborators_url of this Project. # noqa: E501
:rtype: str
"""
return self._collaborators_url
@collaborators_url.setter
def collaborators_url(self, collaborators_url):
"""Sets the collaborators_url of this Project.
:param collaborators_url: The collaborators_url of this Project. # noqa: E501
:type: str
"""
self._collaborators_url = collaborators_url
@property
def hooks_url(self):
"""Gets the hooks_url of this Project. # noqa: E501
:return: The hooks_url of this Project. # noqa: E501
:rtype: str
"""
return self._hooks_url
@hooks_url.setter
def hooks_url(self, hooks_url):
"""Sets the hooks_url of this Project.
:param hooks_url: The hooks_url of this Project. # noqa: E501
:type: str
"""
self._hooks_url = hooks_url
@property
def branches_url(self):
"""Gets the branches_url of this Project. # noqa: E501
:return: The branches_url of this Project. # noqa: E501
:rtype: str
"""
return self._branches_url
@branches_url.setter
def branches_url(self, branches_url):
"""Sets the branches_url of this Project.
:param branches_url: The branches_url of this Project. # noqa: E501
:type: str
"""
self._branches_url = branches_url
@property
def tags_url(self):
"""Gets the tags_url of this Project. # noqa: E501
:return: The tags_url of this Project. # noqa: E501
:rtype: str
"""
return self._tags_url
@tags_url.setter
def tags_url(self, tags_url):
"""Sets the tags_url of this Project.
:param tags_url: The tags_url of this Project. # noqa: E501
:type: str
"""
self._tags_url = tags_url
@property
def blobs_url(self):
"""Gets the blobs_url of this Project. # noqa: E501
:return: The blobs_url of this Project. # noqa: E501
:rtype: str
"""
return self._blobs_url
@blobs_url.setter
def blobs_url(self, blobs_url):
"""Sets the blobs_url of this Project.
:param blobs_url: The blobs_url of this Project. # noqa: E501
:type: str
"""
self._blobs_url = blobs_url
@property
def stargazers_url(self):
"""Gets the stargazers_url of this Project. # noqa: E501
:return: The stargazers_url of this Project. # noqa: E501
:rtype: str
"""
return self._stargazers_url
@stargazers_url.setter
def stargazers_url(self, stargazers_url):
"""Sets the stargazers_url of this Project.
:param stargazers_url: The stargazers_url of this Project. # noqa: E501
:type: str
"""
self._stargazers_url = stargazers_url
@property
def contributors_url(self):
"""Gets the contributors_url of this Project. # noqa: E501
:return: The contributors_url of this Project. # noqa: E501
:rtype: str
"""
return self._contributors_url
@contributors_url.setter
def contributors_url(self, contributors_url):
"""Sets the contributors_url of this Project.
:param contributors_url: The contributors_url of this Project. # noqa: E501
:type: str
"""
self._contributors_url = contributors_url
@property
def commits_url(self):
"""Gets the commits_url of this Project. # noqa: E501
:return: The commits_url of this Project. # noqa: E501
:rtype: str
"""
return self._commits_url
@commits_url.setter
def commits_url(self, commits_url):
"""Sets the commits_url of this Project.
:param commits_url: The commits_url of this Project. # noqa: E501
:type: str
"""
self._commits_url = commits_url
@property
def comments_url(self):
"""Gets the comments_url of this Project. # noqa: E501
:return: The comments_url of this Project. # noqa: E501
:rtype: str
"""
return self._comments_url
@comments_url.setter
def comments_url(self, comments_url):
"""Sets the comments_url of this Project.
:param comments_url: The comments_url of this Project. # noqa: E501
:type: str
"""
self._comments_url = comments_url
@property
def issue_comment_url(self):
"""Gets the issue_comment_url of this Project. # noqa: E501
:return: The issue_comment_url of this Project. # noqa: E501
:rtype: str
"""
return self._issue_comment_url
@issue_comment_url.setter
def issue_comment_url(self, issue_comment_url):
"""Sets the issue_comment_url of this Project.
:param issue_comment_url: The issue_comment_url of this Project. # noqa: E501
:type: str
"""
self._issue_comment_url = issue_comment_url
@property
def issues_url(self):
"""Gets the issues_url of this Project. # noqa: E501
:return: The issues_url of this Project. # noqa: E501
:rtype: str
"""
return self._issues_url
@issues_url.setter
def issues_url(self, issues_url):
"""Sets the issues_url of this Project.
:param issues_url: The issues_url of this Project. # noqa: E501
:type: str
"""
self._issues_url = issues_url
@property
def pulls_url(self):
"""Gets the pulls_url of this Project. # noqa: E501
:return: The pulls_url of this Project. # noqa: E501
:rtype: str
"""
return self._pulls_url
@pulls_url.setter
def pulls_url(self, pulls_url):
"""Sets the pulls_url of this Project.
:param pulls_url: The pulls_url of this Project. # noqa: E501
:type: str
"""
self._pulls_url = pulls_url
@property
def milestones_url(self):
"""Gets the milestones_url of this Project. # noqa: E501
:return: The milestones_url of this Project. # noqa: E501
:rtype: str
"""
return self._milestones_url
@milestones_url.setter
def milestones_url(self, milestones_url):
"""Sets the milestones_url of this Project.
:param milestones_url: The milestones_url of this Project. # noqa: E501
:type: str
"""
self._milestones_url = milestones_url
@property
def notifications_url(self):
"""Gets the notifications_url of this Project. # noqa: E501
:return: The notifications_url of this Project. # noqa: E501
:rtype: str
"""
return self._notifications_url
@notifications_url.setter
def notifications_url(self, notifications_url):
"""Sets the notifications_url of this Project.
:param notifications_url: The notifications_url of this Project. # noqa: E501
:type: str
"""
self._notifications_url = notifications_url
@property
def labels_url(self):
"""Gets the labels_url of this Project. # noqa: E501
:return: The labels_url of this Project. # noqa: E501
:rtype: str
"""
return self._labels_url
@labels_url.setter
def labels_url(self, labels_url):
"""Sets the labels_url of this Project.
:param labels_url: The labels_url of this Project. # noqa: E501
:type: str
"""
self._labels_url = labels_url
@property
def releases_url(self):
"""Gets the releases_url of this Project. # noqa: E501
:return: The releases_url of this Project. # noqa: E501
:rtype: str
"""
return self._releases_url
@releases_url.setter
def releases_url(self, releases_url):
"""Sets the releases_url of this Project.
:param releases_url: The releases_url of this Project. # noqa: E501
:type: str
"""
self._releases_url = releases_url
@property
def recommend(self):
"""Gets the recommend of this Project. # noqa: E501
:return: The recommend of this Project. # noqa: E501
:rtype: str
"""
return self._recommend
@recommend.setter
def recommend(self, recommend):
"""Sets the recommend of this Project.
:param recommend: The recommend of this Project. # noqa: E501
:type: str
"""
self._recommend = recommend
@property
def homepage(self):
"""Gets the homepage of this Project. # noqa: E501
:return: The homepage of this Project. # noqa: E501
:rtype: str
"""
return self._homepage
@homepage.setter
def homepage(self, homepage):
"""Sets the homepage of this Project.
:param homepage: The homepage of this Project. # noqa: E501
:type: str
"""
self._homepage = homepage
@property
def language(self):
"""Gets the language of this Project. # noqa: E501
:return: The language of this Project. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this Project.
:param language: The language of this Project. # noqa: E501
:type: str
"""
self._language = language
@property
def forks_count(self):
"""Gets the forks_count of this Project. # noqa: E501
:return: The forks_count of this Project. # noqa: E501
:rtype: str
"""
return self._forks_count
@forks_count.setter
def forks_count(self, forks_count):
"""Sets the forks_count of this Project.
:param forks_count: The forks_count of this Project. # noqa: E501
:type: str
"""
self._forks_count = forks_count
@property
def stargazers_count(self):
"""Gets the stargazers_count of this Project. # noqa: E501
:return: The stargazers_count of this Project. # noqa: E501
:rtype: str
"""
return self._stargazers_count
@stargazers_count.setter
def stargazers_count(self, stargazers_count):
"""Sets the stargazers_count of this Project.
:param stargazers_count: The stargazers_count of this Project. # noqa: E501
:type: str
"""
self._stargazers_count = stargazers_count
@property
def watchers_count(self):
"""Gets the watchers_count of this Project. # noqa: E501
:return: The watchers_count of this Project. # noqa: E501
:rtype: str
"""
return self._watchers_count
@watchers_count.setter
def watchers_count(self, watchers_count):
"""Sets the watchers_count of this Project.
:param watchers_count: The watchers_count of this Project. # noqa: E501
:type: str
"""
self._watchers_count = watchers_count
@property
def default_branch(self):
"""Gets the default_branch of this Project. # noqa: E501
:return: The default_branch of this Project. # noqa: E501
:rtype: str
"""
return self._default_branch
@default_branch.setter
def default_branch(self, default_branch):
"""Sets the default_branch of this Project.
:param default_branch: The default_branch of this Project. # noqa: E501
:type: str
"""
self._default_branch = default_branch
@property
def open_issues_count(self):
"""Gets the open_issues_count of this Project. # noqa: E501
:return: The open_issues_count of this Project. # noqa: E501
:rtype: int
"""
return self._open_issues_count
@open_issues_count.setter
def open_issues_count(self, open_issues_count):
"""Sets the open_issues_count of this Project.
:param open_issues_count: The open_issues_count of this Project. # noqa: E501
:type: int
"""
self._open_issues_count = open_issues_count
@property
def has_issues(self):
"""Gets the has_issues of this Project. # noqa: E501
:return: The has_issues of this Project. # noqa: E501
:rtype: str
"""
return self._has_issues
@has_issues.setter
def has_issues(self, has_issues):
"""Sets the has_issues of this Project.
:param has_issues: The has_issues of this Project. # noqa: E501
:type: str
"""
self._has_issues = has_issues
@property
def has_wiki(self):
"""Gets the has_wiki of this Project. # noqa: E501
:return: The has_wiki of this Project. # noqa: E501
:rtype: str
"""
return self._has_wiki
@has_wiki.setter
def has_wiki(self, has_wiki):
"""Sets the has_wiki of this Project.
:param has_wiki: The has_wiki of this Project. # noqa: E501
:type: str
"""
self._has_wiki = has_wiki
@property
def issue_comment(self):
"""Gets the issue_comment of this Project. # noqa: E501
:return: The issue_comment of this Project. # noqa: E501
:rtype: str
"""
return self._issue_comment
@issue_comment.setter
def issue_comment(self, issue_comment):
"""Sets the issue_comment of this Project.
:param issue_comment: The issue_comment of this Project. # noqa: E501
:type: str
"""
self._issue_comment = issue_comment
@property
def can_comment(self):
"""Gets the can_comment of this Project. # noqa: E501
:return: The can_comment of this Project. # noqa: E501
:rtype: str
"""
return self._can_comment
@can_comment.setter
def can_comment(self, can_comment):
"""Sets the can_comment of this Project.
:param can_comment: The can_comment of this Project. # noqa: E501
:type: str
"""
self._can_comment = can_comment
@property
def pull_requests_enabled(self):
"""Gets the pull_requests_enabled of this Project. # noqa: E501
:return: The pull_requests_enabled of this Project. # noqa: E501
:rtype: str
"""
return self._pull_requests_enabled
@pull_requests_enabled.setter
def pull_requests_enabled(self, pull_requests_enabled):
"""Sets the pull_requests_enabled of this Project.
:param pull_requests_enabled: The pull_requests_enabled of this Project. # noqa: E501
:type: str
"""
self._pull_requests_enabled = pull_requests_enabled
@property
def has_page(self):
"""Gets the has_page of this Project. # noqa: E501
:return: The has_page of this Project. # noqa: E501
:rtype: str
"""
return self._has_page
@has_page.setter
def has_page(self, has_page):
"""Sets the has_page of this Project.
:param has_page: The has_page of this Project. # noqa: E501
:type: str
"""
self._has_page = has_page
@property
def license(self):
"""Gets the license of this Project. # noqa: E501
:return: The license of this Project. # noqa: E501
:rtype: str
"""
return self._license
@license.setter
def license(self, license):
"""Sets the license of this Project.
:param license: The license of this Project. # noqa: E501
:type: str
"""
self._license = license
@property
def outsourced(self):
"""Gets the outsourced of this Project. # noqa: E501
:return: The outsourced of this Project. # noqa: E501
:rtype: str
"""
return self._outsourced
@outsourced.setter
def outsourced(self, outsourced):
"""Sets the outsourced of this Project.
:param outsourced: The outsourced of this Project. # noqa: E501
:type: str
"""
self._outsourced = outsourced
@property
def project_creator(self):
"""Gets the project_creator of this Project. # noqa: E501
:return: The project_creator of this Project. # noqa: E501
:rtype: str
"""
return self._project_creator
@project_creator.setter
def project_creator(self, project_creator):
"""Sets the project_creator of this Project.
:param project_creator: The project_creator of this Project. # noqa: E501
:type: str
"""
self._project_creator = project_creator
@property
def members(self):
"""Gets the members of this Project. # noqa: E501
:return: The members of this Project. # noqa: E501
:rtype: str
"""
return self._members
@members.setter
def members(self, members):
"""Sets the members of this Project.
:param members: The members of this Project. # noqa: E501
:type: str
"""
self._members = members
@property
def pushed_at(self):
"""Gets the pushed_at of this Project. # noqa: E501
:return: The pushed_at of this Project. # noqa: E501
:rtype: str
"""
return self._pushed_at
@pushed_at.setter
def pushed_at(self, pushed_at):
"""Sets the pushed_at of this Project.
:param pushed_at: The pushed_at of this Project. # noqa: E501
:type: str
"""
self._pushed_at = pushed_at
@property
def created_at(self):
"""Gets the created_at of this Project. # noqa: E501
:return: The created_at of this Project. # noqa: E501
:rtype: str
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Project.
:param created_at: The created_at of this Project. # noqa: E501
:type: str
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this Project. # noqa: E501
:return: The updated_at of this Project. # noqa: E501
:rtype: str
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this Project.
:param updated_at: The updated_at of this Project. # noqa: E501
:type: str
"""
self._updated_at = updated_at
@property
def parent(self):
"""Gets the parent of this Project. # noqa: E501
:return: The parent of this Project. # noqa: E501
:rtype: Project
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this Project.
:param parent: The parent of this Project. # noqa: E501
:type: Project
"""
self._parent = parent
@property
def paas(self):
"""Gets the paas of this Project. # noqa: E501
:return: The paas of this Project. # noqa: E501
:rtype: str
"""
return self._paas
@paas.setter
def paas(self, paas):
"""Sets the paas of this Project.
:param paas: The paas of this Project. # noqa: E501
:type: str
"""
self._paas = paas
@property
def stared(self):
"""Gets the stared of this Project. # noqa: E501
:return: The stared of this Project. # noqa: E501
:rtype: str
"""
return self._stared
@stared.setter
def stared(self, stared):
"""Sets the stared of this Project.
:param stared: The stared of this Project. # noqa: E501
:type: str
"""
self._stared = stared
@property
def watched(self):
"""Gets the watched of this Project. # noqa: E501
:return: The watched of this Project. # noqa: E501
:rtype: str
"""
return self._watched
@watched.setter
def watched(self, watched):
"""Sets the watched of this Project.
:param watched: The watched of this Project. # noqa: E501
:type: str
"""
self._watched = watched
@property
def permission(self):
"""Gets the permission of this Project. # noqa: E501
:return: The permission of this Project. # noqa: E501
:rtype: str
"""
return self._permission
@permission.setter
def permission(self, permission):
"""Sets the permission of this Project.
:param permission: The permission of this Project. # noqa: E501
:type: str
"""
self._permission = permission
@property
def relation(self):
"""Gets the relation of this Project. # noqa: E501
:return: The relation of this Project. # noqa: E501
:rtype: str
"""
return self._relation
@relation.setter
def relation(self, relation):
"""Sets the relation of this Project.
:param relation: The relation of this Project. # noqa: E501
:type: str
"""
self._relation = relation
@property
def assignees_number(self):
"""Gets the assignees_number of this Project. # noqa: E501
:return: The assignees_number of this Project. # noqa: E501
:rtype: str
"""
return self._assignees_number
@assignees_number.setter
def assignees_number(self, assignees_number):
"""Sets the assignees_number of this Project.
:param assignees_number: The assignees_number of this Project. # noqa: E501
:type: str
"""
self._assignees_number = assignees_number
@property
def testers_number(self):
"""Gets the testers_number of this Project. # noqa: E501
:return: The testers_number of this Project. # noqa: E501
:rtype: str
"""
return self._testers_number
@testers_number.setter
def testers_number(self, testers_number):
"""Sets the testers_number of this Project.
:param testers_number: The testers_number of this Project. # noqa: E501
:type: str
"""
self._testers_number = testers_number
@property
def assignees(self):
"""Gets the assignees of this Project. # noqa: E501
:return: The assignees of this Project. # noqa: E501
:rtype: list[str]
"""
return self._assignees
@assignees.setter
def assignees(self, assignees):
"""Sets the assignees of this Project.
:param assignees: The assignees of this Project. # noqa: E501
:type: list[str]
"""
self._assignees = assignees
@property
def testers(self):
"""Gets the testers of this Project. # noqa: E501
:return: The testers of this Project. # noqa: E501
:rtype: list[str]
"""
return self._testers
@testers.setter
def testers(self, testers):
"""Sets the testers of this Project.
:param testers: The testers of this Project. # noqa: E501
:type: list[str]
"""
self._testers = testers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Project, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Project):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.038813 | 120 | 0.580601 |
acde9ed4cd897f770834b9e6448d9e8521d3fd10 | 1,779 | py | Python | femagtools/jhb.py | dapu/femagtools | 95eaf750adc2013232cdf482e523b3900ac6eb08 | [
"BSD-2-Clause"
] | 21 | 2016-09-07T12:17:21.000Z | 2022-01-08T11:43:24.000Z | femagtools/jhb.py | dapu/femagtools | 95eaf750adc2013232cdf482e523b3900ac6eb08 | [
"BSD-2-Clause"
] | 63 | 2016-09-11T12:04:48.000Z | 2022-03-28T13:22:16.000Z | femagtools/jhb.py | dapu/femagtools | 95eaf750adc2013232cdf482e523b3900ac6eb08 | [
"BSD-2-Clause"
] | 8 | 2017-07-12T13:05:57.000Z | 2022-01-08T11:43:26.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import os
import femagtools.mcv as mcv
import logging
logger = logging.getLogger(__name__)
class Reader(object):
def __init__(self, filename):
self.mc1_type = mcv.MAGCRV
with open(filename) as f:
self.name = os.path.splitext(
os.path.basename(f.readline().strip()))[0]
r = f.readline().split()
self.angle = [float(a) for a in r[1:]]
self.curve = [dict(hi=[], bi=[])
for i in range(int(r[0])-1)]
for l in f:
r = l.split()
if len(r) > 1:
bh = [float(x.replace(',', '.')) for x in r]
if bh[1] > 0:
b = bh[0]
for i, h in enumerate(bh[1:]):
self.curve[i]['hi'].append(h)
self.curve[i]['bi'].append(b)
if len(self.curve) > 1:
self.mc1_type = mcv.ORIENT_CRV
logger.info("JHB %s curves %d sizes %s", self.name, len(self.curve),
[len(c['bi']) for c in self.curve])
def __getitem__(self, index):
return self.__getattribute__(index)
def getValues(self):
"""return values as mcv dict"""
return {
'name': self.name,
'ctype': self.mc1_type,
'curve': self.curve}
def read(filename):
"""read JHB File and return mc dict"""
jhb = Reader(filename)
return jhb.getValues()
if __name__ == "__main__":
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = sys.stdin.readline().strip()
jhb = Reader(filename)
print(jhb.getValues())
| 28.693548 | 76 | 0.483418 |
acdea05e3448dbc34389b5f980901f3ef5c87613 | 1,135 | py | Python | seleniumbase/virtual_display/xephyr.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | 1 | 2021-05-12T14:27:31.000Z | 2021-05-12T14:27:31.000Z | seleniumbase/virtual_display/xephyr.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | null | null | null | seleniumbase/virtual_display/xephyr.py | tomejorge/SeleniumBase | e3e50bbd80594c52131b0d88ca3e2c2f7692e340 | [
"MIT"
] | null | null | null | from seleniumbase.virtual_display.easyprocess import EasyProcess
from seleniumbase.virtual_display.abstractdisplay import AbstractDisplay
PROGRAM = "Xephyr"
class XephyrDisplay(AbstractDisplay):
"""
Xephyr wrapper
Xephyr is an X server outputting to a window on a pre-existing X display
"""
def __init__(self, size=(1024, 768), color_depth=24, bgcolor="black"):
"""
:param bgcolor: 'black' or 'white'
"""
self.color_depth = color_depth
self.size = size
self.bgcolor = bgcolor
self.screen = 0
self.process = None
self.display = None
AbstractDisplay.__init__(self)
@classmethod
def check_installed(cls):
p = EasyProcess([PROGRAM, "-help"])
p.enable_stdout_log = False
p.enable_stderr_log = False
p.call()
@property
def _cmd(self):
cmd = [
PROGRAM,
dict(black="-br", white="-wr")[self.bgcolor],
"-screen",
"x".join(map(str, list(self.size) + [self.color_depth])),
self.new_display_var,
]
return cmd
| 26.395349 | 76 | 0.597357 |
acdea07cba893a13675301c641c71be5d0775170 | 160 | py | Python | apps/portfolios/admin.py | aldwyn/effigia | eb456656949bf68934530bbec9c15ebc6d0236b8 | [
"MIT"
] | 1 | 2018-11-15T05:17:30.000Z | 2018-11-15T05:17:30.000Z | apps/portfolios/admin.py | aldwyn/effigia | eb456656949bf68934530bbec9c15ebc6d0236b8 | [
"MIT"
] | 5 | 2021-06-09T17:20:01.000Z | 2022-03-11T23:18:06.000Z | apps/portfolios/admin.py | aldwyn/effigia | eb456656949bf68934530bbec9c15ebc6d0236b8 | [
"MIT"
] | 1 | 2018-10-05T19:03:27.000Z | 2018-10-05T19:03:27.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Portfolio
admin.site.register(Portfolio)
| 20 | 39 | 0.78125 |
acdea0ba785e38119eaccfd1793684bc38f3cb47 | 1,662 | py | Python | dataclean/Pre-processing .py | keke-ing/DalianHouseDataAnalysis | 0c61a8233ae250a84635d6427e8a22ac4af8fcf0 | [
"MIT"
] | null | null | null | dataclean/Pre-processing .py | keke-ing/DalianHouseDataAnalysis | 0c61a8233ae250a84635d6427e8a22ac4af8fcf0 | [
"MIT"
] | 3 | 2020-03-03T12:30:30.000Z | 2020-04-08T16:10:36.000Z | dataclean/Pre-processing .py | keke-ing/DalianHouseDataAnalysis | 0c61a8233ae250a84635d6427e8a22ac4af8fcf0 | [
"MIT"
] | null | null | null | import pandas as pd
if __name__ == '__main__':
df = pd.read_csv(r'../data/lianjia_cleaned.csv', sep=',')
# 打印原始基本信息
print("original data rowCount: %d" % (df.shape[0]))
print("original data colCount: %d" % (df.shape[1]))
print(df.dtypes)
# 去除无用数据列
df.drop("year_of_property", axis=1, inplace=True) # 房屋年限
# df.drop(["deal_time"], axis=1, inplace=True)
# 特征数值化
df.replace({"with_elevator": "无"}, 0, inplace=True)
df.replace({"with_elevator": "有"}, 1, inplace=True)
df.replace({"is_two_five": "暂无数据"}, 0, inplace=True)
df.replace({"is_two_five": "满两年"}, 2, inplace=True)
df.replace({"is_two_five": "满五年"}, 5, inplace=True)
df.loc[:, "gross_area"] = df["gross_area"].str.strip("㎡")
# 房屋户型
# df[['bedroom', 'living_room', 'kitchen', 'toilet']] = df["household_style"].str.extract('(\d+)室(\d+)厅(\d+)厨(\d+)卫',
# expand=False)
lon = []
lat = []
la_lo = df['longitude_latitude']
for i in la_lo:
lon.append(i.split(',')[-2] )
lat.append(i.split(',')[-1] )
df['lon'] = lon
df['lat'] = lat
# df.drop("longitude_latitude", axis=1, inplace=True)
# 楼层
df["floor_number"] = df["floor_number"].str.extract('共(\d+)层', expand=False)
# 建筑年限转为楼龄
df["build_year"] = df["build_year"].map(lambda x: 2020 - int(x))
df.rename(columns={'build_year': 'house_age'}, inplace=True)
print("processed data rowCount: %d" % (df.shape[0]))
print("processed data colCount: %d" % (df.shape[1]))
# 写文件
df.to_csv("../data/lianjia_processed.csv", index=False)
| 34.625 | 121 | 0.561974 |
acdea1258ff89db38e8c6744c6973e0550339952 | 670 | py | Python | manage.py | renolet/todo-challenge | 41e18950ed6e86a702d3b16b44b10fcbeaa65835 | [
"MIT"
] | null | null | null | manage.py | renolet/todo-challenge | 41e18950ed6e86a702d3b16b44b10fcbeaa65835 | [
"MIT"
] | null | null | null | manage.py | renolet/todo-challenge | 41e18950ed6e86a702d3b16b44b10fcbeaa65835 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'task_managment.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.130435 | 78 | 0.68209 |
acdea126e3865988528f6d851d61c906302a5d20 | 9,138 | py | Python | sdk/python/pulumi_azure_native/insights/v20201020/get_my_workbook.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/insights/v20201020/get_my_workbook.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/insights/v20201020/get_my_workbook.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMyWorkbookResult',
'AwaitableGetMyWorkbookResult',
'get_my_workbook',
]
@pulumi.output_type
class GetMyWorkbookResult:
"""
An Application Insights private workbook definition.
"""
def __init__(__self__, category=None, display_name=None, etag=None, id=None, identity=None, kind=None, location=None, name=None, serialized_data=None, source_id=None, storage_uri=None, tags=None, time_modified=None, type=None, user_id=None, version=None):
if category and not isinstance(category, str):
raise TypeError("Expected argument 'category' to be a str")
pulumi.set(__self__, "category", category)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if etag and not isinstance(etag, dict):
raise TypeError("Expected argument 'etag' to be a dict")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if serialized_data and not isinstance(serialized_data, str):
raise TypeError("Expected argument 'serialized_data' to be a str")
pulumi.set(__self__, "serialized_data", serialized_data)
if source_id and not isinstance(source_id, str):
raise TypeError("Expected argument 'source_id' to be a str")
pulumi.set(__self__, "source_id", source_id)
if storage_uri and not isinstance(storage_uri, str):
raise TypeError("Expected argument 'storage_uri' to be a str")
pulumi.set(__self__, "storage_uri", storage_uri)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if time_modified and not isinstance(time_modified, str):
raise TypeError("Expected argument 'time_modified' to be a str")
pulumi.set(__self__, "time_modified", time_modified)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_id and not isinstance(user_id, str):
raise TypeError("Expected argument 'user_id' to be a str")
pulumi.set(__self__, "user_id", user_id)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def category(self) -> str:
"""
Workbook category, as defined by the user at creation time.
"""
return pulumi.get(self, "category")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The user-defined name of the private workbook.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def etag(self) -> Optional[Mapping[str, str]]:
"""
Resource etag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.MyWorkbookManagedIdentityResponse']:
"""
Identity used for BYOS
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
The kind of workbook. Choices are user and shared.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="serializedData")
def serialized_data(self) -> str:
"""
Configuration of this particular private workbook. Configuration data is a string containing valid JSON
"""
return pulumi.get(self, "serialized_data")
@property
@pulumi.getter(name="sourceId")
def source_id(self) -> Optional[str]:
"""
Optional resourceId for a source resource.
"""
return pulumi.get(self, "source_id")
@property
@pulumi.getter(name="storageUri")
def storage_uri(self) -> Optional[str]:
"""
BYOS Storage Account URI
"""
return pulumi.get(self, "storage_uri")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeModified")
def time_modified(self) -> str:
"""
Date and time in UTC of the last modification that was made to this private workbook definition.
"""
return pulumi.get(self, "time_modified")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Azure resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userId")
def user_id(self) -> str:
"""
Unique user id of the specific user that owns this private workbook.
"""
return pulumi.get(self, "user_id")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
This instance's version of the data model. This can change as new features are added that can be marked private workbook.
"""
return pulumi.get(self, "version")
class AwaitableGetMyWorkbookResult(GetMyWorkbookResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMyWorkbookResult(
category=self.category,
display_name=self.display_name,
etag=self.etag,
id=self.id,
identity=self.identity,
kind=self.kind,
location=self.location,
name=self.name,
serialized_data=self.serialized_data,
source_id=self.source_id,
storage_uri=self.storage_uri,
tags=self.tags,
time_modified=self.time_modified,
type=self.type,
user_id=self.user_id,
version=self.version)
def get_my_workbook(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMyWorkbookResult:
"""
An Application Insights private workbook definition.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the Application Insights component resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:insights/v20201020:getMyWorkbook', __args__, opts=opts, typ=GetMyWorkbookResult).value
return AwaitableGetMyWorkbookResult(
category=__ret__.category,
display_name=__ret__.display_name,
etag=__ret__.etag,
id=__ret__.id,
identity=__ret__.identity,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
serialized_data=__ret__.serialized_data,
source_id=__ret__.source_id,
storage_uri=__ret__.storage_uri,
tags=__ret__.tags,
time_modified=__ret__.time_modified,
type=__ret__.type,
user_id=__ret__.user_id,
version=__ret__.version)
| 34.877863 | 259 | 0.629022 |
acdea2b0044e9004f845387de9cc43352892081e | 2,229 | py | Python | tests/test_tree.py | postelrich/tornado_restful | d1a1f392c319922982ef4e82ff85f22feae084ea | [
"BSD-3-Clause"
] | null | null | null | tests/test_tree.py | postelrich/tornado_restful | d1a1f392c319922982ef4e82ff85f22feae084ea | [
"BSD-3-Clause"
] | null | null | null | tests/test_tree.py | postelrich/tornado_restful | d1a1f392c319922982ef4e82ff85f22feae084ea | [
"BSD-3-Clause"
] | null | null | null | import pytest
from tornado_restful.path import RestFunction
from tornado_restful.tree import Step, PathDispatcher
def test_step_init():
s = Step('path', 'part')
assert s.path == 'path'
assert s.part == 'part'
def test_pathdispatcher_init():
p = PathDispatcher()
assert isinstance(p.registry, Step)
assert p.registry.path == '/'
assert p.registry.part == '/'
assert p.regex_paths == list()
def test_pathdispatcher_find():
assert PathDispatcher._find([], 'x') == ([], 'x')
s1 = Step('/widgets', 'widgets')
s = Step('/', 'GET', children={'widgets': s1})
assert PathDispatcher._find(['widgets'], s) == ([], s1)
with pytest.raises(ValueError):
PathDispatcher._find(['wodgets'], s)
s2 = Step('/widgets/*', '*')
s1 = Step('/widgets', 'widgets', children={'*': s2})
s = Step('/', 'GET', children={'widgets': s1})
with pytest.raises(ValueError):
PathDispatcher._find(['widgets', '1'], s)
assert PathDispatcher._find(['widgets', '1'], s, wildcard=True) == ([], s2)
def test_pathdispatcher_insert():
s = Step('/', '/')
PathDispatcher._insert('/widgets/*/gidgets', ['GET', 'widgets', '*', 'gidgets'],
RestFunction('x', '/widgets/<int:foo>/gidgets', 'x'), s)
assert 'GET' in s
assert 'widgets' in s.children['GET']
with pytest.raises(ValueError):
PathDispatcher._insert('/widgets/*/gidgets', ['GET', 'widgets', '*', 'gidgets'],
RestFunction('x', '/widgets/<int:foo>/gidgets', 'x'), s)
def test_pathdispatcher_path_to_parts():
assert PathDispatcher.path_to_parts('', 'GET') == ['GET', '']
assert PathDispatcher.path_to_parts('/', 'GET') == ['GET', '']
assert PathDispatcher.path_to_parts('/foo', 'GET') == ['GET', 'foo']
assert PathDispatcher.path_to_parts('/foo/bar', 'GET') == ['GET', 'foo', 'bar']
assert PathDispatcher.path_to_parts('/foo/*/bar', 'GET') == ['GET', 'foo', '*', 'bar']
def test_pathdispatcher_register_rest_function():
rf = RestFunction('x', '/widgets/<int:foo>/gidgets', 'x')
p = PathDispatcher()
p.register_rest_function(rf)
assert 'x' in p.registry
assert 'widgets' in p.registry.children['x']
| 36.540984 | 90 | 0.607447 |
acdea41efbd8b310a68afc258948c32189e9967b | 2,050 | py | Python | Voice_Recognition/voicePassword.py | Dnisde/EC544-_-Final-Project | f9d24ac5c4aedf2e10698b79dd7d87ebba7ce570 | [
"MIT"
] | null | null | null | Voice_Recognition/voicePassword.py | Dnisde/EC544-_-Final-Project | f9d24ac5c4aedf2e10698b79dd7d87ebba7ce570 | [
"MIT"
] | null | null | null | Voice_Recognition/voicePassword.py | Dnisde/EC544-_-Final-Project | f9d24ac5c4aedf2e10698b79dd7d87ebba7ce570 | [
"MIT"
] | null | null | null | import json
import subprocess
import glob
import os
import time
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
# def voiceRecognition_start():
# global p1
# rpistr_voice = "./voice2text.sh"
# p1 = subprocess.Popen(rpistr_voice, shell=True, preexec_fn=os.setsid)
def publish_host():
# Publish one Message
myMQTTClient.publish(
topic="$aws/things/k64f/shadow/update/accepted",
QoS=1,
payload="Host, open the door")
print("Published message to the topic!")
def publish_unknown():
# Publish one Message
myMQTTClient.publish(
topic="$aws/things/k64f/shadow/update/accepted",
QoS=1,
payload="Unknown person!")
print("Published message to the topic!")
if __name__ == '__main__':
myMQTTClient = AWSIoTMQTTClient("Ccw_Mac_ID") #random key, if another connection using the same key is opened the previous one is auto closed by AWS IOT
myMQTTClient.configureEndpoint("a2vv0cnk6n1beh-ats.iot.us-east-1.amazonaws.com", 8883)
myMQTTClient.configureCredentials("../../Certificates/root-ca.pem", "../../Certificates/private.pem.key", "../../Certificates/certificate.pem.crt")
myMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
myMQTTClient.connect()
print ('Initialized Connection between current device and AWS IoT Core...')
# Read converted text from user
with open("converted.json") as f:
linesCount = len(f.readlines())
if linesCount <= 1:
print("No voice detected!")
else:
data = f.readlines()[1]
data_json = json.loads(data)
convText = (data_json['result'][0]['alternative'][0]['transcript'])
print("\n" + convText)
password = "networking the physical world"
if convText == password:
print("Password matched! Door open..")
publish_host()
else:
print("Wrong password..")
publish_unknown()
f.close() | 30.147059 | 153 | 0.716585 |
acdea449fc0f69a9813e3e98252394924ada5496 | 102,950 | py | Python | pypureclient/flasharray/FA_2_5/api/volumes_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_5/api/volumes_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | pypureclient/flasharray/FA_2_5/api/volumes_api.py | ashahid-ps/py-pure-client | 2e3565d37b2a41db69308769f6f485d08a7c46c3 | [
"BSD-2-Clause"
] | null | null | null | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class VolumesApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api25_volumes_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete a volume
Deletes a volume that has been destroyed and is pending eradication. Eradicated volumes cannot be recovered. Volumes are destroyed using the `PATCH` method. The `ids` or `names` parameter is required, but they cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.VolumeGetResponse
"""List volumes
Displays a list of volumes, including those pending eradication.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: VolumeGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api25_volumes_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api25_volumes_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VolumeGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_patch_with_http_info(
self,
volume=None, # type: models.VolumePatch
authorization=None, # type: str
x_request_id=None, # type: str
ids=None, # type: List[str]
names=None, # type: List[str]
truncate=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.VolumeResponse
"""Update a volume
Updates a volume, renaming, destroying, or resizing it. To rename a volume, set `name` to the new name. To destroy a volume, set `destroyed=true`. To recover a volume that has been destroyed and is pending eradication, set `destroyed=false`. Sets the bandwidth and IOPs limits of a volume through the respective `bandwidth_limit` and `iops_limit` parameter. Moves the volume into a pod or volume group through the respective `pod` or `volume_group` parameter. The `ids` or `names` parameter is required, but cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_patch_with_http_info(volume, async_req=True)
>>> result = thread.get()
:param VolumePatch volume: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool truncate: If set to `true`, reduces the size of a volume during a volume resize operation. When a volume is truncated, Purity automatically takes an undo snapshot, providing a 24-hour window during which the previous contents can be retrieved. After truncating a volume, its provisioned size can be subsequently increased, but the data in truncated sectors cannot be retrieved. If set to `false` or not set at all and the volume is being reduced in size, the volume copy operation fails. Required if the `provisioned` parameter is set to a volume size that is smaller than the original size.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: VolumeResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'volume' is set
if volume is None:
raise TypeError("Missing the required parameter `volume` when calling `api25_volumes_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'truncate' in params:
query_params.append(('truncate', params['truncate']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'volume' in params:
body_params = params['volume']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VolumeResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_performance_by_array_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ResourcePerformanceByArrayGetResponse
"""List volume performance data by array
Return real-time and historical performance data, real-time latency data, and average I/O size data. The data returned is for each volume on the current array and for each volume on any remote arrays that are visible to the current array. The data is grouped by individual volumes and as a total across all volumes on each array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_performance_by_array_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int end_time: Displays historical performance data for the specified time window, where `start_time` is the beginning of the time window, and `end_time` is the end of the time window. The `start_time` and `end_time` parameters are specified in milliseconds since the UNIX epoch. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. If `end_time`is not specified, the end time will default to the current time. Include the `resolution` parameter to display the performance data at the specified resolution. If not specified, `resolution` defaults to the lowest valid resolution.
:param int resolution: The number of milliseconds between samples of historical data. For array-wide performance metrics (`/arrays/performance` endpoint), valid values are `1000` (1 second), `30000` (30 seconds), `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). For performance metrics on storage objects (`<object name>/performance` endpoint), such as volumes, valid values are `30000` (30 seconds), `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). For space metrics, (`<object name>/space` endpoint), valid values are `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). Include the `start_time` parameter to display the performance data starting at the specified start time. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. Include the `end_time` parameter to display the performance data until the specified end time. If `end_time`is not specified, the end time will default to the current time. If the `resolution` parameter is not specified but either the `start_time` or `end_time` parameter is, then `resolution` will default to the lowest valid resolution.
:param int start_time: Displays historical performance data for the specified time window, where `start_time` is the beginning of the time window, and `end_time` is the end of the time window. The `start_time` and `end_time` parameters are specified in milliseconds since the UNIX epoch. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. If `end_time`is not specified, the end time will default to the current time. Include the `resolution` parameter to display the performance data at the specified resolution. If not specified, `resolution` defaults to the lowest valid resolution.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ResourcePerformanceByArrayGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api25_volumes_performance_by_array_get`, must be a value greater than or equal to `0`")
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api25_volumes_performance_by_array_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api25_volumes_performance_by_array_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/performance/by-array', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcePerformanceByArrayGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_performance_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ResourcePerformanceGetResponse
"""List volume performance data
Returns real-time and historical performance data, real-time latency data, and average I/O sizes for each volume and and as a total of all volumes across the entire array.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_performance_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int end_time: Displays historical performance data for the specified time window, where `start_time` is the beginning of the time window, and `end_time` is the end of the time window. The `start_time` and `end_time` parameters are specified in milliseconds since the UNIX epoch. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. If `end_time`is not specified, the end time will default to the current time. Include the `resolution` parameter to display the performance data at the specified resolution. If not specified, `resolution` defaults to the lowest valid resolution.
:param int resolution: The number of milliseconds between samples of historical data. For array-wide performance metrics (`/arrays/performance` endpoint), valid values are `1000` (1 second), `30000` (30 seconds), `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). For performance metrics on storage objects (`<object name>/performance` endpoint), such as volumes, valid values are `30000` (30 seconds), `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). For space metrics, (`<object name>/space` endpoint), valid values are `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). Include the `start_time` parameter to display the performance data starting at the specified start time. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. Include the `end_time` parameter to display the performance data until the specified end time. If `end_time`is not specified, the end time will default to the current time. If the `resolution` parameter is not specified but either the `start_time` or `end_time` parameter is, then `resolution` will default to the lowest valid resolution.
:param int start_time: Displays historical performance data for the specified time window, where `start_time` is the beginning of the time window, and `end_time` is the end of the time window. The `start_time` and `end_time` parameters are specified in milliseconds since the UNIX epoch. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. If `end_time`is not specified, the end time will default to the current time. Include the `resolution` parameter to display the performance data at the specified resolution. If not specified, `resolution` defaults to the lowest valid resolution.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ResourcePerformanceGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api25_volumes_performance_get`, must be a value greater than or equal to `0`")
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api25_volumes_performance_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api25_volumes_performance_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/performance', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcePerformanceGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_post_with_http_info(
self,
volume=None, # type: models.VolumePost
authorization=None, # type: str
x_request_id=None, # type: str
names=None, # type: List[str]
overwrite=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.VolumeResponse
"""Create a volume
Creates one or more virtual storage volumes of the specified size. If `provisioned` is not specified, the size of the new volume defaults to 1 MB in size. The `names` query parameter is required.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_post_with_http_info(volume, async_req=True)
>>> result = thread.get()
:param VolumePost volume: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool overwrite: If set to `true`, overwrites an existing volume during a volume copy operation. If set to `false` or not set at all and the target name is an existing volume, the volume copy operation fails. Required if the `source: id` or `source: name` body parameter is set and the source overwrites an existing volume during the volume copy operation.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: VolumeResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'volume' is set
if volume is None:
raise TypeError("Missing the required parameter `volume` when calling `api25_volumes_post`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'overwrite' in params:
query_params.append(('overwrite', params['overwrite']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'volume' in params:
body_params = params['volume']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='VolumeResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_protection_groups_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
group_names=None, # type: List[str]
member_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Remove a volume from a protection group
Removes a volume member from a protection group. After the member has been removed, it is no longer protected by the group. Any protection group snapshots that were taken before the member was removed will not be affected. Removing a member from a protection group does not delete the member from the array, and the member can be added back to the protection group at any time. The `group_names` parameter represents the name of the protection group, and the `member_names` parameter represents the name of the volume. The `group_names` and `member_names` parameters are required and must be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_protection_groups_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/protection-groups', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_protection_groups_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
group_names=None, # type: List[str]
limit=None, # type: int
member_names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.MemberNoIdAllGetResponse
"""List volumes that are members of protection groups
Returns a list of volume members that belong to one or more protection groups.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_protection_groups_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: MemberNoIdAllGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api25_volumes_protection_groups_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api25_volumes_protection_groups_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/protection-groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MemberNoIdAllGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_protection_groups_post_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
group_names=None, # type: List[str]
member_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.MemberNoIdAllResponse
"""Add a volume to a protection group
Adds a volume member to a protection group. Members that are already in the protection group are not affected. For asynchronous replication, only members of the same type can belong to a protection group. The `group_names` parameter represents the name of the protection group, and the `member_names` parameter represents the name of the volume. The `group_names` and `member_names` parameters are required and must be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_protection_groups_post_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: MemberNoIdAllResponse
If the method is called asynchronously,
returns the request thread.
"""
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/protection-groups', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MemberNoIdAllResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_space_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
ids=None, # type: List[str]
limit=None, # type: int
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ResourceSpaceGetResponse
"""List volume space information
Returns the provisioned size and physical storage consumption data for each volume.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_space_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int end_time: Displays historical performance data for the specified time window, where `start_time` is the beginning of the time window, and `end_time` is the end of the time window. The `start_time` and `end_time` parameters are specified in milliseconds since the UNIX epoch. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. If `end_time`is not specified, the end time will default to the current time. Include the `resolution` parameter to display the performance data at the specified resolution. If not specified, `resolution` defaults to the lowest valid resolution.
:param int resolution: The number of milliseconds between samples of historical data. For array-wide performance metrics (`/arrays/performance` endpoint), valid values are `1000` (1 second), `30000` (30 seconds), `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). For performance metrics on storage objects (`<object name>/performance` endpoint), such as volumes, valid values are `30000` (30 seconds), `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). For space metrics, (`<object name>/space` endpoint), valid values are `300000` (5 minutes), `1800000` (30 minutes), `7200000` (2 hours), `28800000` (8 hours), and `86400000` (24 hours). Include the `start_time` parameter to display the performance data starting at the specified start time. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. Include the `end_time` parameter to display the performance data until the specified end time. If `end_time`is not specified, the end time will default to the current time. If the `resolution` parameter is not specified but either the `start_time` or `end_time` parameter is, then `resolution` will default to the lowest valid resolution.
:param int start_time: Displays historical performance data for the specified time window, where `start_time` is the beginning of the time window, and `end_time` is the end of the time window. The `start_time` and `end_time` parameters are specified in milliseconds since the UNIX epoch. If `start_time` is not specified, the start time will default to one resolution before the end time, meaning that the most recent sample of performance data will be displayed. If `end_time`is not specified, the end time will default to the current time. Include the `resolution` parameter to display the performance data at the specified resolution. If not specified, `resolution` defaults to the lowest valid resolution.
:param list[str] ids: Performs the operation on the unique resource IDs specified. Enter multiple resource IDs in comma-separated format. The `ids` and `names` parameters cannot be provided together.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ResourceSpaceGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if ids is not None:
if not isinstance(ids, list):
ids = [ids]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'resolution' in params and params['resolution'] < 0:
raise ValueError("Invalid value for parameter `resolution` when calling `api25_volumes_space_get`, must be a value greater than or equal to `0`")
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api25_volumes_space_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api25_volumes_space_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'resolution' in params:
query_params.append(('resolution', params['resolution']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
if 'ids' in params:
query_params.append(('ids', params['ids']))
collection_formats['ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/space', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourceSpaceGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_tags_batch_put_with_http_info(
self,
tag=None, # type: List[models.Tag]
authorization=None, # type: str
x_request_id=None, # type: str
resource_ids=None, # type: List[str]
resource_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.TagResponse
"""Update tags
Updates tags.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_tags_batch_put_with_http_info(tag, async_req=True)
>>> result = thread.get()
:param list[Tag] tag: A list of tags to be created or, if already existing, updated. (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] resource_ids: A comma-separated list of resource IDs. The `resource_ids` and `resource_names` parameters cannot be provided together.
:param list[str] resource_names: A comma-separated list of resource names. The `resource_ids` and `resource_names` parameters cannot be provided together.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: TagResponse
If the method is called asynchronously,
returns the request thread.
"""
if tag is not None:
if not isinstance(tag, list):
tag = [tag]
if resource_ids is not None:
if not isinstance(resource_ids, list):
resource_ids = [resource_ids]
if resource_names is not None:
if not isinstance(resource_names, list):
resource_names = [resource_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'tag' is set
if tag is None:
raise TypeError("Missing the required parameter `tag` when calling `api25_volumes_tags_batch_put`")
collection_formats = {}
path_params = {}
query_params = []
if 'resource_ids' in params:
query_params.append(('resource_ids', params['resource_ids']))
collection_formats['resource_ids'] = 'csv'
if 'resource_names' in params:
query_params.append(('resource_names', params['resource_names']))
collection_formats['resource_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'tag' in params:
body_params = params['tag']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/tags/batch', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_tags_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
keys=None, # type: List[str]
namespaces=None, # type: List[str]
resource_ids=None, # type: List[str]
resource_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete tags
Deletes specified tags on volume objects.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_tags_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] keys: A comma-separated list of tag keys.
:param list[str] namespaces: A comma-separated list of namespaces.
:param list[str] resource_ids: A comma-separated list of resource IDs. The `resource_ids` and `resource_names` parameters cannot be provided together.
:param list[str] resource_names: A comma-separated list of resource names. The `resource_ids` and `resource_names` parameters cannot be provided together.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if keys is not None:
if not isinstance(keys, list):
keys = [keys]
if namespaces is not None:
if not isinstance(namespaces, list):
namespaces = [namespaces]
if resource_ids is not None:
if not isinstance(resource_ids, list):
resource_ids = [resource_ids]
if resource_names is not None:
if not isinstance(resource_names, list):
resource_names = [resource_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'keys' in params:
query_params.append(('keys', params['keys']))
collection_formats['keys'] = 'csv'
if 'namespaces' in params:
query_params.append(('namespaces', params['namespaces']))
collection_formats['namespaces'] = 'csv'
if 'resource_ids' in params:
query_params.append(('resource_ids', params['resource_ids']))
collection_formats['resource_ids'] = 'csv'
if 'resource_names' in params:
query_params.append(('resource_names', params['resource_names']))
collection_formats['resource_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/tags', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_tags_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
namespaces=None, # type: List[str]
offset=None, # type: int
resource_destroyed=None, # type: bool
resource_ids=None, # type: List[str]
resource_names=None, # type: List[str]
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.TagGetResponse
"""List tags
Displays the list of tags on volume objects.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_tags_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] namespaces: A comma-separated list of namespaces.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param bool resource_destroyed: If set to `true`, returns only objects from destroyed resources. Returns an error if the name of a live resource is specified in the `resource_names` query parameter. If set to `false`, returns only objects from live resources. Returns an error if the name of a destroyed resource is specified in the `resource_names` query parameter.
:param list[str] resource_ids: A comma-separated list of resource IDs. The `resource_ids` and `resource_names` parameters cannot be provided together.
:param list[str] resource_names: A comma-separated list of resource names. The `resource_ids` and `resource_names` parameters cannot be provided together.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: TagGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if namespaces is not None:
if not isinstance(namespaces, list):
namespaces = [namespaces]
if resource_ids is not None:
if not isinstance(resource_ids, list):
resource_ids = [resource_ids]
if resource_names is not None:
if not isinstance(resource_names, list):
resource_names = [resource_names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api25_volumes_tags_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api25_volumes_tags_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'namespaces' in params:
query_params.append(('namespaces', params['namespaces']))
collection_formats['namespaces'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'resource_destroyed' in params:
query_params.append(('resource_destroyed', params['resource_destroyed']))
if 'resource_ids' in params:
query_params.append(('resource_ids', params['resource_ids']))
collection_formats['resource_ids'] = 'csv'
if 'resource_names' in params:
query_params.append(('resource_names', params['resource_names']))
collection_formats['resource_names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/tags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TagGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api25_volumes_volume_groups_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
filter=None, # type: str
group_ids=None, # type: List[str]
limit=None, # type: int
member_ids=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_item_count=None, # type: bool
group_names=None, # type: List[str]
member_names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.MemberGetResponse
"""List volumes that are in volume groups
Returns a list of volumes that are in a volume group.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api25_volumes_volume_groups_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param list[str] group_ids: A comma-separated list of group IDs.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] member_ids: Performs the operation on the unique member IDs specified. Enter multiple member IDs in comma-separated format. The `member_ids` and `member_names` parameters cannot be provided together.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param list[str] group_names: Performs the operation on the unique group name specified. Examples of groups include host groups, pods, protection groups, and volume groups. Enter multiple names in comma-separated format. For example, `hgroup01,hgroup02`.
:param list[str] member_names: Performs the operation on the unique member name specified. Examples of members include volumes, hosts, host groups, and directories. Enter multiple names in comma-separated format. For example, `vol01,vol02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: MemberGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if group_ids is not None:
if not isinstance(group_ids, list):
group_ids = [group_ids]
if member_ids is not None:
if not isinstance(member_ids, list):
member_ids = [member_ids]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
if group_names is not None:
if not isinstance(group_names, list):
group_names = [group_names]
if member_names is not None:
if not isinstance(member_names, list):
member_names = [member_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api25_volumes_volume_groups_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api25_volumes_volume_groups_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'group_ids' in params:
query_params.append(('group_ids', params['group_ids']))
collection_formats['group_ids'] = 'csv'
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'member_ids' in params:
query_params.append(('member_ids', params['member_ids']))
collection_formats['member_ids'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'group_names' in params:
query_params.append(('group_names', params['group_names']))
collection_formats['group_names'] = 'csv'
if 'member_names' in params:
query_params.append(('member_names', params['member_names']))
collection_formats['member_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.5/volumes/volume-groups', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='MemberGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 59.646582 | 1,397 | 0.66373 |
acdea45d80e084821b2979251675bca6f2ad2714 | 22,217 | py | Python | Packs/MicrosoftManagementActivity/Integrations/MicrosoftManagementActivity/MicrosoftManagementActivity_test.py | ddi-danielsantander/content | 67e2edc404f50c332d928dbdbce00a447bb5532f | [
"MIT"
] | 2 | 2020-07-27T10:35:41.000Z | 2020-12-14T15:44:18.000Z | Packs/MicrosoftManagementActivity/Integrations/MicrosoftManagementActivity/MicrosoftManagementActivity_test.py | ddi-danielsantander/content | 67e2edc404f50c332d928dbdbce00a447bb5532f | [
"MIT"
] | 48 | 2022-03-08T13:45:00.000Z | 2022-03-31T14:32:05.000Z | Packs/MicrosoftManagementActivity/Integrations/MicrosoftManagementActivity/MicrosoftManagementActivity_test.py | ddi-danielsantander/content | 67e2edc404f50c332d928dbdbce00a447bb5532f | [
"MIT"
] | 2 | 2020-12-10T12:02:45.000Z | 2020-12-15T09:20:01.000Z | from CommonServerPython import *
import pytest
from datetime import datetime, timedelta
''' MOCK DATA AND RESPONSES '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
TIME_ONE_MINUTE_AGO_DATETIME = datetime.now() - timedelta(minutes=1)
TIME_ONE_MINUTE_AGO_STRING = datetime.strftime(TIME_ONE_MINUTE_AGO_DATETIME, DATE_FORMAT)
TIME_ONE_HOUR_AGO = datetime.now() - timedelta(hours=1)
TIME_ONE_HOUR_AGO_STRING = datetime.strftime(TIME_ONE_HOUR_AGO, DATE_FORMAT)
TIME_6_HOURS_AGO = datetime.now() - timedelta(hours=6)
TIME_6_HOURS_AGO_STRING = datetime.strftime(TIME_6_HOURS_AGO, DATE_FORMAT)
TIME_12_HOURS_AGO = datetime.now() - timedelta(hours=12)
TIME_12_HOURS_AGO_STRING = datetime.strftime(TIME_12_HOURS_AGO, DATE_FORMAT)
TIME_24_HOURS_AGO = datetime.now() - timedelta(hours=24)
TIME_24_HOURS_AGO_STRING = datetime.strftime(TIME_24_HOURS_AGO, DATE_FORMAT)
TIME_48_HOURS_AGO = datetime.now() - timedelta(hours=48)
TIME_48_HOURS_AGO_STRING = datetime.strftime(TIME_48_HOURS_AGO, DATE_FORMAT)
TEST_FETCH_FIRST_RUN = ({}, TIME_12_HOURS_AGO, 720, 710)
TEST_FETCH_FIRST_RUN_WITH_DELTA_OVER_24_HOURS = ({}, TIME_48_HOURS_AGO, 2880, 2870)
TEST_FETCH_NOT_FIRST_RUN = ({'last_fetch': TIME_6_HOURS_AGO_STRING}, 48, 360, 350)
FETCH_TIMES_TEST_DATA = [
TEST_FETCH_FIRST_RUN,
TEST_FETCH_FIRST_RUN_WITH_DELTA_OVER_24_HOURS,
TEST_FETCH_NOT_FIRST_RUN
]
DATE_YESTERDAY_IN_EPOCH = int((datetime.now() - datetime(1970, 1, 1)).total_seconds()) - 24 * 60 * 60
DATE_TOMORROW_IN_EPOCH = int((datetime.now() - datetime(1970, 1, 1)).total_seconds()) + 24 * 60 * 60
START_SUBSCRIPTION_RESPONSE = {
"contentType": "Audit.AzureActiveDirectory",
"status": "enabled",
"webhook": None
}
LIST_SUBSCRIPTIONS_RESPONSE_MULTIPLE_SUBSCRIPTIONS = [
{
"contentType": "Audit.AzureActiveDirectory",
"status": "enabled",
"webhook": None
},
{
"contentType": "audit.general",
"status": "enabled",
"webhook": None
}
]
LIST_SUBSCRIPTIONS_RESPONSE_SINGLE_SUBSCRIPTION = [
{
"contentType": "audit.general",
"status": "enabled",
"webhook": None
}
]
LIST_SUBSCRIPTIONS_RESPONSE_NO_SUBSCRIPTIONS = []
LIST_CONTENT_AUDIT_GENERAL_RESPONSE = [
{
"contentUri": "https://manage.office.com/api/v1.0/test1",
"contentId": "test1",
"contentType": "audit.general",
"contentCreated": TIME_6_HOURS_AGO_STRING,
},
{
"contentUri": "https://manage.office.com/api/v1.0/test2",
"contentId": "test2",
"contentType": "audit.general",
"contentCreated": TIME_6_HOURS_AGO_STRING,
},
{
"contentUri": "https://manage.office.com/api/v1.0/test3",
"contentId": "test3",
"contentType": "audit.general",
"contentCreated": TIME_6_HOURS_AGO_STRING,
}
]
LIST_CONTENT_AUDIT_GENERAL_RESPONSE_CONTENT_RECORDS_RESPONSE = [
{
"CreationTime": "2020-02-27T00:57:40",
"Id": "1234",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
},
{
"CreationTime": "2020-02-27T00:57:40",
"Id": "567",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
},
{
"CreationTime": "2020-02-27T00:57:40",
"Id": "89",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
LIST_CONTENT_AZUREACTIVE_RESPONSE = [
{
"contentUri": "https://manage.office.com/api/v1.0/test4",
"contentId": "test4",
"contentType": "Audit.AzureActiveDirectory",
"contentCreated": TIME_6_HOURS_AGO_STRING,
},
{
"contentUri": "https://manage.office.com/api/v1.0/test5",
"contentId": "test5",
"contentType": "Audit.AzureActiveDirectory",
"contentCreated": TIME_6_HOURS_AGO_STRING,
},
{
"contentUri": "https://manage.office.com/api/v1.0/test6",
"contentId": "test6",
"contentType": "Audit.AzureActiveDirectory",
"contentCreated": TIME_6_HOURS_AGO_STRING,
}
]
LIST_CONTENT_RESPONSE_NO_DATA = []
GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL = [
{
"CreationTime": "2020-02-27T00:57:40",
"Id": "1234",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL_SECOND_RESPONSE = [
{
"CreationTime": "2020-02-27T00:57:40",
"Id": "567",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL_THIRD_RESPONSE = [
{
"CreationTime": "2020-02-27T00:57:40",
"Id": "89",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
GET_BLOB_DATA_RESPONSE_FOR_AUDIT_ACTIVEDIRECTORY = [
{
"CreationTime": "2020-02-27T00:57:40",
"Id": "5678",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
CONTENT_RECORD_CREATED_ONE_HOUR_AGO = [
{
"CreationTime": TIME_ONE_HOUR_AGO_STRING,
"Id": "5678",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
CONTENT_RECORD_CREATED_48_HOURS_AGO = [
{
"CreationTime": TIME_48_HOURS_AGO_STRING,
"Id": "5678",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
CONTENT_RECORDS_CREATED_1_AND_6_HOURS_AGO = [
{
"CreationTime": TIME_ONE_HOUR_AGO_STRING,
"Id": "5678",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
},
{
"CreationTime": TIME_6_HOURS_AGO_STRING,
"Id": "5678",
"Operation": "Test",
"OrganizationId": "Test1234",
"RecordType": 25,
"UserKey": "key1234",
"UserType": 5,
"Version": 1234,
"Workload": "MicrosoftTeams",
"UserId": "Application",
"CommunicationType": "Team",
"Members": [
{
"DisplayName": "test",
"Role": 1,
"UPN": "test@test.onmicrosoft.com"
}
],
"TeamGuid": "testGuid",
"ItemName": "TestTeam",
"TeamName": "TestTeam"
}
]
GET_CONTENT_RECORDS_TEST_DATA = [
(['audit.general'], LIST_CONTENT_AUDIT_GENERAL_RESPONSE),
(['audit.AzureActiveDirectory'], LIST_CONTENT_AZUREACTIVE_RESPONSE),
(['audit.AzureActiveDirectory', 'audit.general'], LIST_CONTENT_AZUREACTIVE_RESPONSE
+ LIST_CONTENT_AUDIT_GENERAL_RESPONSE)]
TEST_LAST_RUN_UPDATE_DATA = [
([], datetime.strftime(datetime.now(), DATE_FORMAT)),
(CONTENT_RECORD_CREATED_48_HOURS_AGO, datetime.strftime(datetime.now(), DATE_FORMAT)),
(CONTENT_RECORD_CREATED_ONE_HOUR_AGO, TIME_ONE_HOUR_AGO_STRING),
(CONTENT_RECORDS_CREATED_1_AND_6_HOURS_AGO, TIME_ONE_HOUR_AGO_STRING)
]
GET_ACCESS_TOKEN_RESPONSE = {
"token_type": "Bearer",
"scope": "ActivityFeed.Read ActivityFeed.ReadDlp ActivityReports.Read ServiceHealth.Read ThreatIntelligence.Read",
"expires_in": "3599",
"ext_expires_in": "3599",
"expires_on": "1582793586",
"not_before": "1582789686",
"resource": "https://manage.office.com",
"access_token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ0aWQiOiIxMjM0NTY3ODkwIiwiZXhwIjoxNTgyN"
"zkzNTg2fQ.-p8gaG2vG90SHCvrDSratgPv-Bfti4iF2YTZ9AvIeJY",
"refresh_token": "refresh"
}
FIRST_RUN = {}
EXPIRED_TOKEN = {
'expires_on': str(DATE_YESTERDAY_IN_EPOCH),
'refresh_token': 'refresh',
'access_token': 'access'
}
ACTIVE_TOKEN = {
'expires_on': str(DATE_TOMORROW_IN_EPOCH),
'refresh_token': 'refresh',
'access_token': 'access'
}
''' HELPER FUNCTIONS '''
def is_time_in_expected_delta(actual_time, expected_time_delta):
expected_time = datetime.now() - timedelta(minutes=expected_time_delta)
one_minute_before_expected_time = expected_time - timedelta(minutes=1)
return one_minute_before_expected_time <= actual_time <= expected_time
def are_dates_approximately_equal(date_a, date_b):
date_a_datetime = datetime.strptime(date_a, DATE_FORMAT)
date_b_datetime = datetime.strptime(date_b, DATE_FORMAT)
one_minute_before_date_a = date_a_datetime - timedelta(minutes=1)
one_minute_before_date_b = date_b_datetime - timedelta(minutes=1)
date_a_is_almost_date_b = one_minute_before_date_b <= date_a_datetime <= date_b_datetime
date_b_is_almost_date_a = one_minute_before_date_a <= date_b_datetime <= date_a_datetime
return date_a_is_almost_date_b or date_b_is_almost_date_a
def http_return_data(method, url_suffix, full_url, headers, json_data):
return json_data
def create_client():
from MicrosoftManagementActivity import Client
base_url = 'https://manage.office.com/api/v1.0/'
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
client = Client(base_url, verify=verify_certificate, proxy=proxy, self_deployed=True, auth_and_token_url="test",
refresh_token="test", enc_key="test", auth_code="test", tenant_id="test")
return client
''' TESTS '''
@pytest.mark.parametrize('last_run, first_fetch_delta, expected_start_time_'
'in_hours_from_now, expected_end_time_in_hours_from_now', FETCH_TIMES_TEST_DATA)
def test_fetch_times_range(last_run, first_fetch_delta, expected_start_time_in_hours_from_now,
expected_end_time_in_hours_from_now):
from MicrosoftManagementActivity import get_fetch_start_and_end_time
fetch_start_time_str, fetch_end_time_str = get_fetch_start_and_end_time(last_run, first_fetch_delta)
end_time_datetime = datetime.strptime(fetch_end_time_str, DATE_FORMAT)
assert is_time_in_expected_delta(end_time_datetime, expected_end_time_in_hours_from_now)
start_time_datetime = datetime.strptime(fetch_start_time_str, DATE_FORMAT)
assert is_time_in_expected_delta(start_time_datetime, expected_start_time_in_hours_from_now)
TEST_NO_SUBSCRIPTIONS_SPECIFIED = ({}, ["audit.general", "Audit.AzureActiveDirectory"])
TEST_SUBSCRIPTIONS_SPECIFIED = ({"content_types_to_fetch": ["audit.general"]}, ["audit.general"])
@pytest.mark.parametrize('demisto_params, expected_output', [TEST_NO_SUBSCRIPTIONS_SPECIFIED,
TEST_SUBSCRIPTIONS_SPECIFIED])
def test_get_content_types_to_fetch(mocker, requests_mock, demisto_params, expected_output):
from MicrosoftManagementActivity import get_content_types_to_fetch
client = create_client()
set_requests_mock(client, requests_mock)
mocker.patch.object(demisto, 'params', return_value=demisto_params)
assert set(get_content_types_to_fetch(client)) == set(expected_output)
def test_content_records_to_incidents_records_creation():
from MicrosoftManagementActivity import content_records_to_incidents
time_now_string = datetime.strftime(datetime.now(), DATE_FORMAT)
incidents, latest_creation_time = content_records_to_incidents(GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL,
TIME_6_HOURS_AGO_STRING, time_now_string)
single_incident = incidents[0]
assert 'name' in single_incident and single_incident['name'] == 'Microsoft Management Activity: 1234'
assert 'occurred' in single_incident and single_incident['occurred'] == '2020-02-27T00:57:40Z'
@pytest.mark.parametrize('content_records, expected_last_run', TEST_LAST_RUN_UPDATE_DATA)
def test_content_records_to_incidents_last_run(content_records, expected_last_run):
from MicrosoftManagementActivity import content_records_to_incidents
time_now_string = datetime.strftime(datetime.now(), DATE_FORMAT)
end_time = time_now_string
time_24_hours_ago = datetime.now() - timedelta(hours=24)
time_24_hours_ago_string = datetime.strftime(time_24_hours_ago, DATE_FORMAT)
start_time = time_24_hours_ago_string
_, last_run = content_records_to_incidents(content_records, start_time, end_time)
assert are_dates_approximately_equal(last_run, expected_last_run)
def test_fetch_incidents_flow(mocker, requests_mock):
from MicrosoftManagementActivity import fetch_incidents
client = create_client()
set_requests_mock(client, requests_mock)
demisto_params = {
"content_types_to_fetch": "audit.general"
}
mocker.patch.object(demisto, 'params', return_value=demisto_params)
last_run = {}
first_fetch_delta = TIME_24_HOURS_AGO
next_run, incidents = fetch_incidents(client, last_run, first_fetch_delta)
incident_names = [incident["name"] for incident in incidents]
assert incident_names == [
"Microsoft Management Activity: 1234",
"Microsoft Management Activity: 1234",
"Microsoft Management Activity: 1234"
]
@pytest.mark.parametrize("command", [("start"), ("stop")])
def test_start_and_stop_subscription(requests_mock, command, ):
from MicrosoftManagementActivity import start_or_stop_subscription_command
args = {
'content_type': 'audit.general'
}
client = create_client()
set_requests_mock(client, requests_mock)
start_or_stop_subscription_command(client, args, command)
# This test does not assert anything, it only tests if the command matches the mocked endpoints.
def test_list_subscriptions(requests_mock, ):
from MicrosoftManagementActivity import list_subscriptions_command
client = create_client()
set_requests_mock(client, requests_mock)
list_subscriptions_command(client)
# This test does not assert anything, it only tests if the command matches the mocked endpoints.
def test_get_all_content_type_records(requests_mock):
from MicrosoftManagementActivity import get_all_content_type_records
client = create_client()
mock_list_content(requests_mock)
first_audit_general_blob_uri = "https://manage.office.com/api/v1.0/test1"
second_audit_general_blob_uri = "https://manage.office.com/api/v1.0/test2"
third_audit_general_blob_uri = "https://manage.office.com/api/v1.0/test3"
requests_mock.get(first_audit_general_blob_uri, json=GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL)
requests_mock.get(second_audit_general_blob_uri, json=GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL_SECOND_RESPONSE)
requests_mock.get(third_audit_general_blob_uri, json=GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL_THIRD_RESPONSE)
content_records = get_all_content_type_records(client, "audit.general", TIME_24_HOURS_AGO, TIME_ONE_MINUTE_AGO_STRING)
content_record_ids = [record['Id'] for record in content_records]
assert set(content_record_ids) == set(["1234", "567", "89"])
def mock_get_access_token(requests_mock, access_token_resp):
requests_mock.post('https://login.windows.net/common/oauth2/token', json=access_token_resp)
def mock_start_subscription(requests_mock, client, start_subscription_resp):
start_subscription_endpoint = 'https://manage.office.com/api/v1.0/{}/activity/feed/subscriptions/' \
'start'.format(client.tenant_id)
requests_mock.post(start_subscription_endpoint, json=start_subscription_resp)
def mock_stop_subscription(requests_mock, client):
stop_subscription_endpoint = 'https://manage.office.com/api/v1.0/{}/activity/feed/subscriptions/' \
'stop'.format(client.tenant_id)
requests_mock.post(stop_subscription_endpoint, json={})
def mock_list_subscriptions(requests_mock, client, list_subscriptions_resp):
list_subscriptions_endpoint = 'https://manage.office.com/api/v1.0/{}/activity/feed/subscriptions/list'.format(
client.tenant_id)
requests_mock.get(list_subscriptions_endpoint, json=list_subscriptions_resp)
def mock_list_content(requests_mock):
list_audit_general_content_endpoint = "https://manage.office.com/api/v1.0/test/activity/feed/subscriptions" \
"/content?contentType=audit.general"
requests_mock.get(list_audit_general_content_endpoint, json=LIST_CONTENT_AUDIT_GENERAL_RESPONSE)
list_audit_general_content_endpoint = "https://manage.office.com/api/v1.0/test/activity/feed/subscriptions" \
"/content?contentType=audit.AzureActiveDirectory"
requests_mock.get(list_audit_general_content_endpoint, json=LIST_CONTENT_AZUREACTIVE_RESPONSE)
def mock_get_blob_data(requests_mock):
test_blob_uri = "https://manage.office.com/api/v1.0/test{}"
for i in range(1, 7):
current_endpoint = test_blob_uri.format(i)
if i < 4:
# It is part of the audit.general test data
requests_mock.get(current_endpoint, json=GET_BLOB_DATA_RESPONSE_FOR_AUDIT_GENERAL)
else:
requests_mock.get(current_endpoint, json=GET_BLOB_DATA_RESPONSE_FOR_AUDIT_ACTIVEDIRECTORY)
def set_requests_mock(client, requests_mock, access_token_resp=GET_ACCESS_TOKEN_RESPONSE,
start_subscription_resp=START_SUBSCRIPTION_RESPONSE,
list_subscriptions_resp=LIST_SUBSCRIPTIONS_RESPONSE_MULTIPLE_SUBSCRIPTIONS):
mock_get_access_token(requests_mock, access_token_resp)
mock_start_subscription(requests_mock, client, start_subscription_resp)
mock_stop_subscription(requests_mock, client)
mock_list_subscriptions(requests_mock, client, list_subscriptions_resp)
mock_list_content(requests_mock)
mock_get_blob_data(requests_mock)
| 35.097946 | 122 | 0.651483 |
acdea4cecd26638bf0ce55e6b50533d1c8e9a8b5 | 2,873 | py | Python | workable:api/workable.py | yassinehamdouni/xml-feeds | e30fbef48b11c9e8eadbdefa191630c990e7ac30 | [
"MIT"
] | null | null | null | workable:api/workable.py | yassinehamdouni/xml-feeds | e30fbef48b11c9e8eadbdefa191630c990e7ac30 | [
"MIT"
] | null | null | null | workable:api/workable.py | yassinehamdouni/xml-feeds | e30fbef48b11c9e8eadbdefa191630c990e7ac30 | [
"MIT"
] | null | null | null | import requests
import xml.etree.ElementTree as ET
from http.server import BaseHTTPRequestHandler
from html_sanitizer import Sanitizer
from urllib.parse import urlparse
class handler(BaseHTTPRequestHandler):
def do_GET(self):
try:
request_url = urlparse(self.requestline)
r = requests.get(
f"https://apply.workable.com/api/v1/widget/accounts/{request_url.query.split()[0]}?details=true")
data = r.json()
allJobsXml = ET.Element('jobs')
for job in data["jobs"]:
jobXml = ET.SubElement(allJobsXml, 'job')
titleXml = ET.SubElement(jobXml, 'title')
titleXml.text = job["title"]
urlXml = ET.SubElement(jobXml, 'url')
urlXml.text = job["url"]
locationXml = ET.SubElement(jobXml, 'location')
locationXml.text = job["city"] + ", " + job["state"] + ", " + job["country"]
# HTML Sanitizer
sanitizer = Sanitizer({
'tags': ('em', 'strong', 'a', 'p', 'br', 'span', 'ul', 'ol', 'li', 'h1', 'h2', 'h3', 'hr'),
'attributes': {"a": "href"},
})
descriptionXml = ET.SubElement(jobXml, 'description')
descriptionXml.text = sanitizer.sanitize(job["description"])
idXml = ET.SubElement(jobXml, 'id')
idXml.text = job["shortcode"]
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.send_header(
'Cache-Control',
'public, immutable, no-transform, s-maxage=3600, max-age=3600'
)
self.end_headers()
message = ET.tostring(allJobsXml)
self.wfile.write(message)
return
except:
requests_url = urlparse(self.requestline)
self.send_response(500)
self.send_header('Content-type', 'text/html')
self.end_headers()
message = "<h1>Internal Error</h1><p>Sorry, there was a problem. Make sure the employer's name is " \
"included in the request path and / or the name is correct.</p> "
self.wfile.write(message.encode())
return
def do_HEAD(self):
try:
request_url = urlparse(self.requestline)
r = requests.get(
f"https://apply.workable.com/api/v1/widget/accounts/{request_url.query.split()[0]}?details=true")
self.send_response(r.status_code)
self.send_header('Content-type', 'text/html')
self.end_headers()
return
except:
self.send_response(500)
self.send_header('Content-type', 'text/html')
self.end_headers()
return
| 38.824324 | 113 | 0.537417 |
acdea4d61d3ada7e9f4f0aa7bc58c5643db2802b | 7,991 | py | Python | tensorflow/contrib/distributions/python/ops/gumbel.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/distributions/python/ops/gumbel.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/distributions/python/ops/gumbel.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 26 | 2017-04-12T16:25:44.000Z | 2018-10-30T10:10:15.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Gumbel distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
class _Gumbel(distribution.Distribution):
"""The scalar Gumbel distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) of this distribution is,
```none
pdf(x; mu, sigma) = exp(-(x - mu) / sigma - exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The cumulative density function of this distribution is,
```cdf(x; mu, sigma) = exp(-exp(-(x - mu) / sigma))```
The Gumbel distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Gumbel(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
tfd = tf.contrib.distributions
# Define a single scalar Gumbel distribution.
dist = tfd.Gumbel(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Gumbels.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tfd.Gumbel(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tfd.Gumbel(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Gumbel"):
"""Construct Gumbel distributions with location and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s).
scale must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(_Gumbel, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = -math_ops.log(-math_ops.log(uniform))
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return -math_ops.exp(-self._z(x))
def _cdf(self, x):
return math_ops.exp(-math_ops.exp(-self._z(x)))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - math_ops.exp(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 1 + math_ops.log(scale) + np.euler_gamma
def _mean(self):
return self.loc + self.scale * np.euler_gamma
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(6)
def _mode(self):
return self.loc * array_ops.ones_like(self.scale)
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
| 34.296137 | 80 | 0.680265 |
acdea54f16e11133777f2e81468061f5f3492b3e | 1,016 | py | Python | merge_sort.py | kyamada101/sort | 33fc9547bc84506d5544849656bc0ec314041b80 | [
"MIT"
] | null | null | null | merge_sort.py | kyamada101/sort | 33fc9547bc84506d5544849656bc0ec314041b80 | [
"MIT"
] | null | null | null | merge_sort.py | kyamada101/sort | 33fc9547bc84506d5544849656bc0ec314041b80 | [
"MIT"
] | null | null | null | import time
import math
with open("number2.txt", "r") as f:
nums = f.read()
num_list = nums.split(",")
del num_list[-1]
num_list = list(map(int, num_list))
def merge_sort(num_list):
# 入力の整数列の長さ
length = len(num_list)
if len(num_list) > 1:
left = merge_sort(num_list[: math.ceil(length / 2)]) # 半分より左側
right = merge_sort(num_list[math.ceil(length / 2) :]) # 半分より右側
merge = []
# 左側と右側のどちらかの中身がなくなるまで、小さいものをmergeに入れていく
while len(right) != 0 and len(left) != 0:
if right[0] < left[0]:
merge.append(right.pop(0))
else:
merge.append(left.pop(0))
# どちらかがなくなったら、もう片方を全部mergeに入れる
if len(right) != 0:
merge.extend(right)
if len(left) != 0:
merge.extend(left)
return merge
else:
return num_list
start = time.time()
ans = merge_sort(num_list)
end = time.time()
print("time is {:.3f}secs".format(end - start))
print(ans[0:100]) | 24.780488 | 71 | 0.563976 |
acdea5ab174667ad77cfefb64df3a23dbb0cd3c6 | 268 | py | Python | helpers/utils.py | chadbartel/Example-API-Service | 1d2601160ad522bfc45909f9a2f487d07bd5a14f | [
"CC0-1.0"
] | null | null | null | helpers/utils.py | chadbartel/Example-API-Service | 1d2601160ad522bfc45909f9a2f487d07bd5a14f | [
"CC0-1.0"
] | null | null | null | helpers/utils.py | chadbartel/Example-API-Service | 1d2601160ad522bfc45909f9a2f487d07bd5a14f | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
"""Utility helper file."""
def merge_dicts(*dict_args):
"""Merge all Python dictionaries into one."""
dict_args = [] if dict_args is None else dict_args
result = {}
for d in dict_args:
result.update(d)
return result | 22.333333 | 54 | 0.645522 |
acdea65dad4243ca9d9a834d0c8431d0c0343e0c | 813 | py | Python | databuilder/databuilder/transformer/remove_field_transformer.py | defendercrypt/amundsen | 83c728b646020f60cf2270c12e766fe4af8c9948 | [
"Apache-2.0"
] | 2,072 | 2020-08-11T20:16:48.000Z | 2022-03-31T07:04:05.000Z | databuilder/databuilder/transformer/remove_field_transformer.py | defendercrypt/amundsen | 83c728b646020f60cf2270c12e766fe4af8c9948 | [
"Apache-2.0"
] | 795 | 2020-08-11T15:24:39.000Z | 2022-03-31T18:56:13.000Z | databuilder/databuilder/transformer/remove_field_transformer.py | defendercrypt/amundsen | 83c728b646020f60cf2270c12e766fe4af8c9948 | [
"Apache-2.0"
] | 671 | 2020-08-11T20:39:56.000Z | 2022-03-31T08:39:07.000Z | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
from typing import Any, Dict
from pyhocon import ConfigTree
from databuilder.transformer.base_transformer import Transformer
FIELD_NAMES = 'field_names' # field name to be removed
LOGGER = logging.getLogger(__name__)
class RemoveFieldTransformer(Transformer):
"""
Remove field in Dict by specifying list of fields (keys).
"""
def init(self, conf: ConfigTree) -> None:
self._field_names = conf.get_list(FIELD_NAMES)
def transform(self, record: Dict[str, Any]) -> Dict[str, Any]:
for k in self._field_names:
if k in record:
del record[k]
return record
def get_scope(self) -> str:
return 'transformer.remove_field'
| 23.228571 | 66 | 0.688807 |
acdea7d00228ba1f031df5ca64d001e151d4efdd | 544 | py | Python | api/serializers/LongestRouteWithRouteContextSerializer.py | M4hakala/drf_route_api_example | 894989bf71a6c781c54093a7ac6a8d7a1d951146 | [
"MIT"
] | null | null | null | api/serializers/LongestRouteWithRouteContextSerializer.py | M4hakala/drf_route_api_example | 894989bf71a6c781c54093a7ac6a8d7a1d951146 | [
"MIT"
] | null | null | null | api/serializers/LongestRouteWithRouteContextSerializer.py | M4hakala/drf_route_api_example | 894989bf71a6c781c54093a7ac6a8d7a1d951146 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from api.models import LongestRouteModel, RouteModel
class LongestRouteWithRouteContextSerializer(serializers.ModelSerializer):
def __init__(self, route: RouteModel, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = route
class Meta:
model = LongestRouteModel
fields = ('route_date',)
def create(self, validated_data):
longest_route = LongestRouteModel.objects.create(route=self.route, **validated_data)
return longest_route
| 30.222222 | 92 | 0.71875 |
acdea8584ed334b80df6250500b2aa71f275d6ef | 11,954 | py | Python | app/models.py | sbybfai/flask_blog | f08bcb6a73d85a926992d1d451f1a2075331a34f | [
"Apache-2.0"
] | null | null | null | app/models.py | sbybfai/flask_blog | f08bcb6a73d85a926992d1d451f1a2075331a34f | [
"Apache-2.0"
] | 3 | 2020-03-24T16:41:57.000Z | 2021-06-01T23:14:22.000Z | app/models.py | sbybfai/flask_blog | f08bcb6a73d85a926992d1d451f1a2075331a34f | [
"Apache-2.0"
] | null | null | null | from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin, AnonymousUserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app, request, url_for
from markdown import markdown
from . import login_manager
from app.exceptions import ValidationError
from datetime import datetime
import hashlib
import bleach
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
def __init__(self, **kwargs):
super(Role, self).__init__(**kwargs)
if self.permissions is None:
self.permissions = 0
def add_permission(self, perm):
if not self.has_permission(perm):
self.permissions += perm
def has_permission(self, perm):
return self.permissions & perm == perm
def remove_permission(self, perm):
if self.has_permission(perm):
self.permissions -= perm
def reset_permissions(self):
self.permissions = 0
@staticmethod
def init_roles():
roles = {
'User': [Permission.COMMENT],
'Moderator': [Permission.COMMENT, Permission.WRITE, Permission.MODERATE],
'Administrator': [ Permission.COMMENT, Permission.WRITE, Permission.MODERATE,
Permission.ADMIN],
}
default_role = 'User'
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.reset_permissions()
for perm in roles[r]:
role.add_permission(perm)
role.default = (role.name == default_role)
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
email = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
about_me = db.Column(db.Text)
join_time = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(name='Administrator').first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = self.gravatar_hash()
@property
def password(self):
raise AttributeError("password is not a readable attribute")
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id}).decode('utf-8')
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id}).decode('utf-8')
@staticmethod
def reset_password(token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
user = User.query.get(data.get('reset'))
if user is None:
return False
user.password = new_password
db.session.add(user)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps(
{'change_email': self.id, 'new_email': new_email}).decode('utf-8')
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.update_email(new_email)
db.session.add(self)
return True
def update_email(self, new_email):
if self.email != new_email:
self.email = new_email
self.avatar_hash = self.gravatar_hash()
def can(self, perm):
return self.role is not None and self.role.has_permission(perm)
def is_administrator(self):
return self.can(Permission.ADMIN)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
db.session.commit()
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://s.gravatar.com/avatar'
else:
url = 'http://s.gravatar.com/avatar'
hash = self.avatar_hash or self.gravatar_hash()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(url=url, hash=hash, size=size, default=default,
rating=rating)
def gravatar_hash(self):
return hashlib.md5(self.email.lower().encode('utf-8')).hexdigest()
def to_json(self):
json_user = {
'url': url_for('api.get_user', id=self.id),
'username': self.username,
'join_time': self.join_time,
'last_seen': self.last_seen,
'posts_url': url_for('api.get_user_posts', id=self.id),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('utf-8')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
if permissions == Permission.COMMENT:
if(current_app.config["ENABLE_COMMENT"]):
return True
elif permissions == Permission.LOGIN:
if(current_app.config["ENABLE_REGISTER"]):
return True
return False
def is_administrator(self):
return False
def get_id(self):
return
class Post(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(64))
category_id = db.Column(db.Integer, db.ForeignKey('categories.id'))
summary = db.Column(db.Text)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
update_time = db.Column(db.DateTime, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
target.body_html = bleach.linkify(markdown(value, extensions=["extra"]))
def to_json(self):
json_post = {
'url': url_for('api.get_post', id=self.id),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'author_url': url_for('api.get_user', id=self.author_id),
'comments_url': url_for('api.get_post_comments', id=self.id),
'comment_count': self.comments.count()
}
return json_post
@staticmethod
def from_json(json_post):
body = json_post.get('body')
if body is None or body == '':
raise ValidationError('post does not have a body')
return Post(body=body)
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
user_name = db.Column(db.String(64))
email = db.Column(db.String(64))
url = db.Column(db.String(64))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
replay_id = db.Column(db.Integer, db.ForeignKey('comments.id'))
replay = db.relationship("Comment", remote_side=[id])
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
def to_json(self):
json_comment = {
'url': url_for('api.get_comment', id=self.id),
'post_url': url_for('api.get_post', id=self.post_id),
'body': self.body,
'body_html': self.body_html,
'timestamp': self.timestamp,
'user_name': self.user_name,
'author_url': self.url,
'name': self.user_name,
'replay_id': self.replay_id,
}
return json_comment
@staticmethod
def from_json(json_comment):
body = json_comment.get('body')
if body is None or body == '':
raise ValidationError('comment does not have a body')
return Comment(body=body)
class Category(db.Model):
__tablename__ = 'categories'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
posts = db.relationship('Post', backref='category', lazy='dynamic')
@staticmethod
def insert_categories():
categories = ['技术', '笔记', '随笔']
for category in categories:
post_category = Category.query.filter_by(name=category).first()
if post_category is None:
post_category = Category(name=category)
db.session.add(post_category)
db.session.commit()
def __repr__(self):
return '<Category %r>' % self.name
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
login_manager.anonymous_user = AnonymousUser
class Permission:
LOGIN = 1 # 游客登录
COMMENT = 2 # 评论
WRITE = 4 # 写文章
MODERATE = 8 # 管理评论
ADMIN = 16 # 管理员
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| 33.391061 | 116 | 0.616865 |
acdea8df95b220baa20118e242604ed3e32d80a4 | 592 | py | Python | log.py | MarcoFavorito/supervised-morphological-segmentation-using-CRFs | 99934e4a40baf9886cefce8050e3d8329125eed0 | [
"MIT"
] | null | null | null | log.py | MarcoFavorito/supervised-morphological-segmentation-using-CRFs | 99934e4a40baf9886cefce8050e3d8329125eed0 | [
"MIT"
] | null | null | null | log.py | MarcoFavorito/supervised-morphological-segmentation-using-CRFs | 99934e4a40baf9886cefce8050e3d8329125eed0 | [
"MIT"
] | null | null | null | import logging
import sys
import settings
log_fileHandler = None
def setup_logger(name):
logger = logging.getLogger(name)
formatter = logging.Formatter(fmt=settings.log_fmt)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
fileHandler = log_fileHandler
if not fileHandler == None:
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.DEBUG)
logger.addHandler(fileHandler)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
| 23.68 | 55 | 0.738176 |
acdea93980a486140c9f16074fa4c81cce561abc | 4,296 | py | Python | src/salt_finder_charts/position_angle.py | saltastroops/salt_finder_charts | f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de | [
"MIT"
] | null | null | null | src/salt_finder_charts/position_angle.py | saltastroops/salt_finder_charts | f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de | [
"MIT"
] | null | null | null | src/salt_finder_charts/position_angle.py | saltastroops/salt_finder_charts | f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de | [
"MIT"
] | null | null | null | from typing import Optional, Tuple
from gilmenel import gilmenel
from gilmenel.instruments import BaseInstrument, Star
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.units import Quantity
MIN_RADIUS = 1 * u.arcmin
MAX_RADIUS = 3 * u.arcmin
MIN_MAG = 15
MAX_MAG = 18
MIN_STAR_SEPARATION = 10 * u.arcsec
def estimated_position_angle(
ra: Quantity,
dec: Quantity,
radius_range: Tuple[Quantity, Quantity] = (MIN_RADIUS, MAX_RADIUS),
mag_range: Tuple[float, float] = (MIN_MAG, MAX_MAG),
min_star_separation: Quantity = MIN_STAR_SEPARATION,
) -> Optional[Quantity]:
"""
Find a suitable position angle.
The GAIA star catalog is used to find a suitable star with which a slit can be
properly positioned, and the position angle of that star relative to the target is
returned.
Parameters
----------
ra : Quantity
Right ascension of the target, as an angle.
dec : Quantity
Declination of the target, as an angle.
radius_range : pair of Quantity
The inner and outer radius (as an angle) of the annulus in which a suitable may
be located.
mag_range : pair of float
The minimum (brightest) and maximum (faintest) magnitude a suitable star may
have.
min_star_separation : Quantity
The minimum angular distance a suitable star must have from neighbouring stars.
"""
# set up the con ditions for the Gaia star catalog search
instr = _build_position_angle_instrument(
radius_range=radius_range,
mag_range=mag_range,
min_star_separation=min_star_separation,
)
center = SkyCoord(ra, dec)
instr.point_to(center)
# search for stars matching the conditions
gilmenel.init()
stars = gilmenel.view_sky(instr)
matching_stars = gilmenel.find_best_stars(instr, stars)
# don't calculate a position angle if there is no suitable star
if len(matching_stars) == 0:
return None
# Find the best of the matching stars.
best_star = sorted(matching_stars, key=_sorting_key(radius_range, mag_range))[0]
best_star_coord = SkyCoord(best_star.ra, best_star.dec)
return center.position_angle(best_star_coord)
def _build_position_angle_instrument(
radius_range: Tuple[Quantity, Quantity],
mag_range: Tuple[float, float],
min_star_separation: Quantity,
) -> BaseInstrument:
"""
Create an "instrument" for the conditions stars must match for use in positioning a
slit.
Parameters
----------
radius_range : pair of Quantity
The inner and outer radius (as an angle) of the annulus in which a suitable may
be located.
mag_range : pair of float
The minimum (brightest) and maximum (faintest) magnitude a suitable star may
have.
min_star_separation : Quantity
The minimum angular distance a suitable star must have from neighbouring stars.
Returns
-------
BaseInstrument
The "instrument" with the search conditions.
"""
class _PositionAngleInstrument(BaseInstrument):
def best_stars(self, stars):
return [s for s in stars if s.merit >= 4]
instr = _PositionAngleInstrument(
name="PositionAngle",
instr_fov=radius_range[1],
inner_excl_distance=radius_range[0],
nearby_limit=min_star_separation,
bright_limit=mag_range[0],
faint_limit=mag_range[1],
)
return instr
def _sorting_key(
radius_range: Tuple[Quantity, Quantity], mag_range: Tuple[float, float]
):
"""
A key function for sorting stars.
The most suitable star comes first in a sorted list.
Parameters
----------
radius_range : pair of Quantity
The inner and outer radius (as an angle) of the annulus in which a suitable may
be located.
mag_range : pair of float
The minimum (brightest) and maximum (faintest) magnitude a suitable star may
have.
Returns
-------
float
A function which can be used for sorting.
"""
target_radius = radius_range[0]
target_magnitude = 0.5 * sum(mag_range)
return lambda star: abs(
(star.radius - target_radius).to_value(u.arcmin)
) + 0.2 * abs(star.g_mag - target_magnitude)
| 29.22449 | 87 | 0.684125 |
acdea99c95418c1600aba614aaa47a1a1ccf9c3f | 3,133 | py | Python | app/grandchallenge/groups/views.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2016-11-05T07:16:30.000Z | 2017-11-23T03:38:03.000Z | app/grandchallenge/groups/views.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 113 | 2015-05-26T09:27:59.000Z | 2018-03-21T10:45:56.000Z | app/grandchallenge/groups/views.py | kaczmarj/grand-challenge.org | 8dc8a2170e51072354f7e94f2a22578805a67b94 | [
"Apache-2.0"
] | 7 | 2015-07-16T20:11:22.000Z | 2017-06-06T02:41:24.000Z | from dal import autocomplete
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import CharField, Q, Value
from django.db.models.functions import Concat
from django.utils.html import format_html
from django.views.generic import FormView
from guardian.mixins import LoginRequiredMixin
from guardian.mixins import (
PermissionRequiredMixin as ObjectPermissionRequiredMixin,
)
from grandchallenge.verifications.models import Verification
class UserGroupUpdateMixin(
LoginRequiredMixin,
ObjectPermissionRequiredMixin,
SuccessMessageMixin,
FormView,
):
raise_exception = True
def get_permission_object(self):
return self.obj
@property
def obj(self):
raise NotImplementedError
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({"object": self.obj, "role": self.get_form().role})
return context
def get_success_url(self):
return self.obj.get_absolute_url()
def form_valid(self, form):
form.add_or_remove_user(obj=self.obj)
return super().form_valid(form)
class UserAutocomplete(LoginRequiredMixin, autocomplete.Select2QuerySetView):
def get_queryset(self):
qs = (
get_user_model()
.objects.order_by("username")
.exclude(username=settings.ANONYMOUS_USER_NAME)
.annotate(
full_name=Concat(
"first_name",
Value(" "),
"last_name",
output_field=CharField(),
)
)
.select_related("verification", "user_profile")
)
if self.q:
qs = qs.filter(
Q(username__icontains=self.q)
| Q(email__icontains=self.q)
| Q(full_name__icontains=self.q)
| Q(verification__email__icontains=self.q)
)
return qs
def get_result_label(self, result):
try:
is_verified = result.verification.is_verified
except Verification.DoesNotExist:
is_verified = False
if is_verified:
return format_html(
'<img class="rounded-circle align-middle" src="{}" width ="20" height ="20"> '
" <b>{}</b> {} "
'<i class="fas fa-user-check text-success"></i>'
" Verified email address at {}",
result.user_profile.get_mugshot_url(),
result.get_username(),
result.get_full_name().title(),
result.verification.email.split("@")[1],
)
else:
return format_html(
'<img class="rounded-circle align-middle" src="{}" width ="20" height ="20"> '
" <b>{}</b> {}",
result.user_profile.get_mugshot_url(),
result.get_username(),
result.get_full_name().title(),
)
| 31.969388 | 94 | 0.595595 |
acdea9b69f3e24202e9f85f392b20aa7494e29d2 | 2,125 | py | Python | config.py | X-Red/mailbox-size | aded6863af5c237f0c43ee7f23592cdd54820f82 | [
"MIT"
] | null | null | null | config.py | X-Red/mailbox-size | aded6863af5c237f0c43ee7f23592cdd54820f82 | [
"MIT"
] | null | null | null | config.py | X-Red/mailbox-size | aded6863af5c237f0c43ee7f23592cdd54820f82 | [
"MIT"
] | 2 | 2020-07-23T12:07:16.000Z | 2020-08-13T12:43:43.000Z | # X-Red.Com - Configuration file
SERVER_NAME = "servidor.x-red.com"
VIRTUAL_ROOT = "/virtual/vweb/" # Must include the / at the end
XRED_LOG = "/var/log/x-red.com/"
MYSQL_HOST = "127.0.0.1"
MYSQL_USER = "root"
# MYSQL_PWD = "Create this variable in config_local.py file and fill this part with your password. If config_local.py does not exist, create one."
MYSQL_DB = "vmail"
# This query is the default query to be used in select_user_paths() function
SELECT_QUERY = "SELECT username, maildir FROM mailbox WHERE active ='1'"
# Please specify the table details for creating new a table and/or inserting obtained sizes.
# If edited, you should change the values on the right hand side with column names (table
# name for the first one) you desire.
USAGE_TABLE = {
'name':'usage_size', # This should be the name of the table
'pk_column':'UsageId', # You may not want to touch this if you already have a table. It is the primary key of the table and it is auto incremented.
'relational_column':'username', # Foreign key column fthat should take a unique identifier for each user (i.e. username, userid, email etc.)
'size_column':'UsageSize', # Finally column for recording total usage size.
'timestamp_column': 'TimeDetected', # Column for recording the time detected
}
TEST_TABLE = "test_table" # This variable is used for unit testing. If you already have a table with the name "test_table" in database, change this variable.
# If you will create a table in order to store the data obtained upon initial use, you may customize the specifications below.
TABLE_DESCRIPTION = (
"CREATE TABLE " + USAGE_TABLE['name'] + "("
" " + USAGE_TABLE['pk_column'] +" int NOT NULL AUTO_INCREMENT,"
" " + USAGE_TABLE['relational_column'] + " varchar(255) NOT NULL,"
" " + USAGE_TABLE['size_column'] + " double,"
" " + USAGE_TABLE['timestamp_column'] + " timestamp,"
" PRIMARY KEY (" + USAGE_TABLE['pk_column'] + ")"
")"
)
#ROOT_DIR = '/virtual/vmail/'
# Load the local configuration
try:
from config_local import *
except:
print("config_local.py is not imported. Please check if the file exists.")
| 47.222222 | 157 | 0.724706 |
acdeaa40af8eca5519037c8226462008db7edc1f | 4,331 | py | Python | sentry/templatetags/sentry_helpers.py | kmike/django-sentry | cdaebf505de2a70c7dc9277d3cbb38e910c5c671 | [
"BSD-3-Clause"
] | 1 | 2016-09-30T16:26:18.000Z | 2016-09-30T16:26:18.000Z | sentry/templatetags/sentry_helpers.py | kmike/django-sentry | cdaebf505de2a70c7dc9277d3cbb38e910c5c671 | [
"BSD-3-Clause"
] | null | null | null | sentry/templatetags/sentry_helpers.py | kmike/django-sentry | cdaebf505de2a70c7dc9277d3cbb38e910c5c671 | [
"BSD-3-Clause"
] | null | null | null | from django import template
from django.db.models import Count
from django.utils import simplejson
from sentry.helpers import get_db_engine
from sentry.plugins import GroupActionProvider
import datetime
register = template.Library()
@register.filter
def is_dict(value):
return isinstance(value, dict)
@register.filter
def with_priority(result_list, key='score'):
if result_list:
if isinstance(result_list[0], dict):
_get = lambda x, k: x[k]
else:
_get = lambda x, k: getattr(x, k)
min_, max_ = min([_get(r, key) for r in result_list]), max([_get(r, key) for r in result_list])
mid = (max_ - min_) / 4
for result in result_list:
val = _get(result, key)
if val > max_ - mid:
priority = 'veryhigh'
elif val > max_ - mid * 2:
priority = 'high'
elif val > max_ - mid * 3:
priority = 'medium'
elif val > max_ - mid * 4:
priority = 'low'
else:
priority = 'verylow'
yield result, priority
@register.filter
def num_digits(value):
return len(str(value))
@register.filter
def chart_data(group, max_days=90):
hours = max_days*24
today = datetime.datetime.now().replace(microsecond=0, second=0, minute=0)
if hasattr(group, '_state'):
from django.db import connections
conn = connections[group._state.db]
else:
from django.db import connection as conn
method = conn.ops.date_trunc_sql('hour', 'datetime')
chart_qs = list(group.message_set.all()\
.filter(datetime__gte=datetime.datetime.now() - datetime.timedelta(hours=hours))\
.extra(select={'grouper': method}).values('grouper')\
.annotate(num=Count('id')).values_list('grouper', 'num')\
.order_by('grouper'))
min_date = chart_qs[0][0]
if min_date and min_date < datetime.datetime.now() - datetime.timedelta(days=1):
stop_hours = (datetime.datetime.now() - min_date).days * 24
start_hours = (datetime.datetime.now() - chart_qs[-1][0]).days * 24
else:
stop_hours = 24
start_hours = 0
rows = dict(chart_qs)
if rows:
max_y = max(rows.values())
else:
max_y = 1
return {
'points': [rows.get(today-datetime.timedelta(hours=d), 0) for d in xrange(start_hours, stop_hours)][::-1],
'categories': [str(today-datetime.timedelta(hours=d)) for d in xrange(start_hours, stop_hours)][::-1],
}
@register.filter
def to_json(data):
return simplejson.dumps(data)
def sentry_version():
from sentry import get_version
return get_version()
register.simple_tag(sentry_version)
@register.filter
def get_actions(group, request):
action_list = []
for cls in GroupActionProvider.plugins.itervalues():
inst = cls(group.pk)
action_list = inst.actions(request, action_list, group)
for action in action_list:
yield action[0], action[1], request.path == action[1]
@register.filter
def get_panels(group, request):
panel_list = []
for cls in GroupActionProvider.plugins.itervalues():
inst = cls(group.pk)
panel_list = inst.panels(request, panel_list, group)
for panel in panel_list:
yield panel[0], panel[1], request.path == panel[1]
@register.filter
def get_widgets(group, request):
for cls in GroupActionProvider.plugins.itervalues():
inst = cls(group.pk)
resp = inst.widget(request, group)
if resp:
yield resp
@register.filter
def get_tags(group, request):
tag_list = []
for cls in GroupActionProvider.plugins.itervalues():
inst = cls(group.pk)
tag_list = inst.tags(request, tag_list, group)
for tag in tag_list:
yield tag
@register.filter
def timesince(value):
from django.template.defaultfilters import timesince
if not value:
return 'Never'
if value < datetime.datetime.now() - datetime.timedelta(days=5):
return value.date()
value = (' '.join(timesince(value).split(' ')[0:2])).strip(',')
if value == '0 minutes':
return 'Just now'
if value == '1 day':
return 'Yesterday'
return value + ' ago'
| 30.935714 | 114 | 0.619026 |
acdeab7841cafdb03b5dfb2c1a8d60d391b1b077 | 6,004 | py | Python | 8.x/mk/python/test/mktestsupport.py | apnadkarni/kitgen | 3523d429733ac8c8915fc749b3af72e92b429530 | [
"MIT"
] | 19 | 2015-03-16T05:35:52.000Z | 2022-03-06T01:37:29.000Z | 8.x/mk/python/test/mktestsupport.py | apnadkarni/kitgen | 3523d429733ac8c8915fc749b3af72e92b429530 | [
"MIT"
] | 4 | 2015-03-13T09:27:24.000Z | 2020-07-27T13:23:38.000Z | 8.x/mk/python/test/mktestsupport.py | apnadkarni/kitgen | 3523d429733ac8c8915fc749b3af72e92b429530 | [
"MIT"
] | 16 | 2015-01-17T20:31:34.000Z | 2022-03-25T07:52:03.000Z | # mktestsupport.py -- Support code used by multiple test modules
# $Id: mktestsupport.py 1230 2007-03-09 15:58:53Z jcw $
# This is part of Metakit, see http://www.equi4.com/metakit/
from test.test_support import TestFailed, verbose
import metakit
import sys
import string
# for overflow testing
MAXINT = sys.maxint
MININT = -MAXINT - 1
MAXLONGLONG = 2**63 - 1
MINLONGLONG = -2**63
MAXULONGLONG = 2**64 - 1
# Check for int/long integration (should fail in Python 2.2, pass in 2.3)
try:
MAXLONGLONG = int(MAXLONGLONG)
int_long_integrated = True
int_long_error = OverflowError
except OverflowError: # long int too large to convert to int
int_long_integrated = False
int_long_error = TypeError
# Check for Unicode support (should fail in Python 1.5.2 or --disable-unicode, pass in 1.6)
try:
UnicodeType = type(unicode(''))
except NameError: # no Unicode support
UnicodeType = None
unicode = None
class Failure:
"""Keeps track of failures as they happen, but doesn't die on the first
one unless it's unrecoverable. If failure_count > 0 when script
finishes, raise TestFailed."""
def __init__(self):
self.failure_count = 0
def fail(self, op, args, err=None, expected=None, actual=None):
print 'FAIL:', op, args
print ' ',
if err is not None: print err,
if actual is not None: print 'got', actual, actual.__class__,
if expected is not None: print 'expected', expected,
print
self.failure_count = self.failure_count + 1
def assess(self):
if self.failure_count > 0:
raise TestFailed(
'%d failures; run in verbose mode for details' % self.failure_count)
class ViewTester:
"""Inserts rows into view and Python array"""
def __init__(self, description):
self.storage = metakit.storage()
self.v = self.storage.getas(description)
self.arr = []
self.failure = Failure()
self.fail = self.failure.fail
self.columns = map(lambda c: string.split(c, ':')[0], string.split(description[string.index(description, '[') + 1:-1], ','))
def dump_view(self):
metakit.dump(self.v, 'VIEW CONTENTS:')
def checklen(self, args):
alen = len(self.arr)
vlen = len(self.v)
if alen != vlen:
self.fail('append', args, 'view length mismatch',
actual=vlen, expected=alen)
try:
print 'ARRAY CONTENTS:'
for arow in self.arr: print arow
self.dump_view()
except: pass
raise TestFailed('unexpected number of rows in view, aborting; run in verbose mode for details')
def _append(self, args):
self.arr.append(args)
def insert(self, *args, **kw):
if kw:
if args:
raise TestFailed("can't have both positional and keyword arguments")
args = kw
try:
self.v.append(args)
self._append(args)
except Exception, e:
self.fail('append', args, actual=e)
try:
self.checklen(args)
except TestFailed:
raise
except Exception, e:
self.fail('append', args, 'spurious', actual=e)
def reject(self, exception_class=Exception, **args):
try:
ix = self.v.append(args)
self.fail('append', args, 'succeeded', expected=exception_class)
self.v.delete(ix)
except Exception, e:
if isinstance(e, exception_class):
if verbose:
print 'PASS: rejected', args
print ' as expected <%s> %s' % (e.__class__, e)
else:
self.fail('append', args, expected=exception_class, actual=e)
try:
self.checklen(args)
except TestFailed:
raise
except Exception, e:
self.fail('append', args, 'spurious', actual=e)
def finished(self):
if verbose:
self.dump_view()
# compare view with array
for arow, vrow in zip(self.arr, self.v):
failed = False
for f in arow.keys():
try:
vf = getattr(vrow, f)
af = arow[f]
# Fix up Unicode
if type(af) == UnicodeType:
vf = unicode(vf, 'utf-8')
if af == vf:
continue
# Perform the same implicit coercion as Mk4py should
if type(af) != type(vf):
try:
af = type(vf)(af)
if af == vf:
continue
except:
pass
# If we get here, we got an exception or the values didn't match
# even with coercion
failed = True
self.fail('%s access' % f, arow, expected=af, actual=vf)
except Exception, e:
failed = True
self.fail('%s access' % f, arow, expected=arow[f], actual=e)
if not failed:
if verbose:
print 'PASS: retrieved', arow
self.failure.assess()
class HashedViewTester(ViewTester):
"""Inserts rows into hashed view and Python array (where appropriate)"""
def __init__(self, description, numkeys):
ViewTester.__init__(self, description)
hv = self.storage.getas('hv[_H:I,_R:I]')
self.v = self.v.hash(hv, numkeys)
def _append(self, args):
if not hasattr(args, 'keys'): # operator module is broken in Python 2.3
argdict = {}
for i in range(len(args)):
argdict[self.columns[i]] = args[i]
args = argdict
if args not in self.arr:
self.arr.append(args)
| 34.505747 | 132 | 0.545303 |
acdeac9fc971d273f635dbab92acdca7958aa7a1 | 2,728 | py | Python | scripts/align_and_map.py | JoanaSantosMartins/snappy | 67fc2bc8d4f84086b7ae2e5b4d58192d2d3daee2 | [
"MIT"
] | null | null | null | scripts/align_and_map.py | JoanaSantosMartins/snappy | 67fc2bc8d4f84086b7ae2e5b4d58192d2d3daee2 | [
"MIT"
] | null | null | null | scripts/align_and_map.py | JoanaSantosMartins/snappy | 67fc2bc8d4f84086b7ae2e5b4d58192d2d3daee2 | [
"MIT"
] | 3 | 2019-06-19T12:32:13.000Z | 2022-02-22T12:19:19.000Z | import argparse
import subprocess
REGIONS = {'GAG':[789, 2292], 'PR':[2252, 2550],
'RT':[2549,3870], 'PR-RT':[2252,3869],
'INT':[4229,5096], 'POL':[2252, 5096],
'ENV':[6224, 8795],
'GAG-POL-ENV':[789, 2292, 2252, 5096, 6224, 8795] }
def align_and_map(msa, out, gr):
"""Align and Map
This function calls MAFFT to performe a sequence alignment between the
HIV-1 reference sequence (NCBI id: HXB2) and a sequence in a given file
'msa'. The alignment method does not allow any gaps the reference sequence.
After the alignment is performed the given sequence in 'msa' will be trimed
to only contain the genomic region specified by the user in 'gr'. The
resulting file is them wirtten to the folder 'aligned' with the following
notation: aligned_{id_of_the_fasta_sequence}.fasta.
Args:
msa (fasta): Fasta file containing 1 fasta sequence to align.
out (fasta): Name of the aligned version of the fasta file.
gr (str): String of the genomic region of interest. Specified in the
'config.yaml' file.
Returns:
This function doe not return.
"""
try:
this_region = REGIONS[gr]
except KeyError:
print(f"Invalid Genomic regioin in the config file!!!\n(Valid \
inputs: {list(REGIONS.keys())})\n")
comand = 'mafft --quiet --add {0} --keeplength ./data/HXB2.fasta'.format(msa)
pre = subprocess.check_output(comand.split(' '), universal_newlines=True)
as_list = pre.split('\n')[:-1]
id_place = [x for x in enumerate(as_list) if x[1][0] == '>'][1][0]
seq_id = as_list[163][1:]
all_seq = ''.join(as_list[int(id_place) +1:])
if len(this_region) == 2:
sequence = all_seq[this_region[0]:this_region[1]]
elif len(this_region) == 6:
gag_reg = all_seq[this_region[0]:this_region[1]]
pol_reg = all_seq[this_region[2]:this_region[3]]
env_reg = all_seq[this_region[4]:this_region[5]]
sequence = gag_reg + pol_reg + env_reg
else:
print('ERROR while passing genomic region. Please check the \
integrety of the script: align_and_map.')
with open(out, 'w') as mapped:
mapped.write('>{}\n{}\n'.format(seq_id, sequence))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i","--input", required = True,
help="Name input file.")
parser.add_argument("-o","--output", required = True,
help="Name output file.")
parser.add_argument("-g","--genomic", required = True,
help="Name of the genomic region.")
args = parser.parse_args()
align_and_map(args.input, args.output, args.genomic)
| 40.117647 | 81 | 0.632698 |
acdeacb2c9407f32fb0393ab6711f7394f57fbde | 2,152 | py | Python | src/eratosthenes.py | SpyrosDellas/learn-python-the-hard-way | bdbed7658f2650b12f7a1388adf27ad436353f61 | [
"MIT"
] | null | null | null | src/eratosthenes.py | SpyrosDellas/learn-python-the-hard-way | bdbed7658f2650b12f7a1388adf27ad436353f61 | [
"MIT"
] | null | null | null | src/eratosthenes.py | SpyrosDellas/learn-python-the-hard-way | bdbed7658f2650b12f7a1388adf27ad436353f61 | [
"MIT"
] | null | null | null | from time import clock
import numpy as np
# Here we will try to optimize Eratosthenes sieve version 3 from previous
# exercise 'primes.py''.
# Version 3 needs 11.3 seconds for all primes up to 10^9. Our target is to
# do that in about 1 second....
def eratosthenes_v3(x):
correction = x % 2
length = (x + correction) / 2
numbers = np.ones(length, dtype = np.bool_)
numbers[0] = False
last_num = x - 1 + correction
sq_root = int(last_num ** 0.5)
position = (sq_root + (sq_root % 2)) / 2 - 1
for i in xrange(1, position):
if numbers[i]:
k1 = 2 * i
k = k1 + i * i * 2
step = k1 + 1
numbers[k: :step] = False
return np.r_[2, 2 * numbers.nonzero()[0] + 1]
# This time we are going to use a wheel of (2,3) to improve memory
# usage and speed.
# Version 4 needs 8.75 seconds for all primes up to 10^9.
# Thus version 4 is 20% faster than version 3.
def eratosthenes_v4(x):
x = int(x)
if x < 2:
return np.array([])
elif x == 2:
return np.array([2])
elif x < 5:
return np.array([2,3])
# Next we create an array representing the following numbers:
# [3, 5, 7, 11, 13, 17, 19, 23, 25, 29, 31, 35, 37, 41, 43, 47,49,53,55...]
length = ((x - 1 | 1) - 1) // 3 + 1
numbers = np.ones(length, dtype = np.bool_)
numbers[0] = 0
last_num = (length * 3 - 2) | 1
sq_root = int(last_num ** 0.5) - 1 | 1
position = (sq_root - 1) // 3
for i in xrange(1, position, 2):
if numbers[i]:
m = (3 * i + 4) * i + 1
step = 6 * i + 4
numbers[m: :step] = False
numbers[(m + 2 * i + 1): :step] = False
if numbers[i + 1]:
m = (3 * i + 8) * i + 5
step = 6 * i + 8
numbers[m: :step] = False
numbers[(m + 4 * i + 5): :step] = False
return np.r_[2, 3, (3 * numbers.nonzero()[0] + 1) | 1]
start = clock()
result = eratosthenes_v4(10 ** 7)
print result[-10:]
end = clock()
print "\n", end - start, "\n"
#start = clock()
#result =
#print result[-10:]
#end = clock()
#print "\n", end - start, "\n"
| 27.589744 | 79 | 0.531134 |
acdeadbac0dded81582b63009474f3e735dfbb33 | 6,691 | py | Python | source/model/callback.py | CostaDiego/diabeticRetinopathy-CExIA | 544cf280249d0545f234101bf1412854ca97a6a4 | [
"MIT"
] | null | null | null | source/model/callback.py | CostaDiego/diabeticRetinopathy-CExIA | 544cf280249d0545f234101bf1412854ca97a6a4 | [
"MIT"
] | null | null | null | source/model/callback.py | CostaDiego/diabeticRetinopathy-CExIA | 544cf280249d0545f234101bf1412854ca97a6a4 | [
"MIT"
] | null | null | null | from keras.callbacks import EarlyStopping
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
def earlyStopping(
monitor='val_loss',
min_delta=0.001,
patience=5,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
):
"""
Define the Early Stopping callback to be used on the train.
Parameters:
monitor: quantity to be monitored.
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: number of epochs that produced the monitored
quantity with no improvement after which training will
be stopped.
Validation quantities may not be produced for every
epoch, if the validation frequency
(`model.fit(validation_freq=5)`) is greater than one.
verbose: verbosity mode.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity to reach.
Training will stop if the model doesn't show improvement
over the baseline.
restore_best_weights: whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Returns:
Early Stopping callback: The callback to stop when train reaches a plateou
"""
early_stop = EarlyStopping(
patience = patience,
monitor = monitor,
min_delta = min_delta,
verbose = verbose,
mode = mode,
baseline = baseline,
restore_best_weights = restore_best_weights
)
return early_stop
def tensorBoard(
log_dir='tensorBoard_logs',
histogram_freq=0,
write_graph=True,
write_images=False,
update_freq='epoch',
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs
):
"""
Define the TensorBoard callback to be used in training.
Parameters:
log_dir: the path of the directory where to save the log
files to be parsed by TensorBoard.
histogram_freq: frequency (in epochs) at which to compute activation
and weight histograms for the layers of the model. If set to 0,
histograms won't be computed. Validation data (or split) must be
specified for histogram visualizations.
batch_size: size of batch of inputs to feed to the network
for histograms computation.
write_graph: whether to visualize the graph in TensorBoard.
The log file can become quite large when
write_graph is set to True.
write_grads: whether to visualize gradient histograms in TensorBoard.
`histogram_freq` must be greater than 0.
write_images: whether to write model weights to visualize as
image in TensorBoard.
embeddings_freq: frequency (in epochs) at which selected embedding
layers will be saved. If set to 0, embeddings won't be computed.
Data to be visualized in TensorBoard's Embedding tab must be passed
as `embeddings_data`.
embeddings_layer_names: a list of names of layers to keep eye on. If
None or empty list all the embedding layer will be watched.
embeddings_metadata: a dictionary which maps layer name to a file name
in which metadata for this embedding layer is saved. See the
[details](https://www.tensorflow.org/guide/embedding#metadata)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
embeddings_data: data to be embedded at layers specified in
`embeddings_layer_names`. Numpy array (if the model has a single
input) or list of Numpy arrays (if the model has multiple inputs).
Learn [more about embeddings](
https://www.tensorflow.org/guide/embedding).
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`, writes
the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `10000`,
the callback will write the metrics and losses to TensorBoard every
10000 samples. Note that writing too frequently to TensorBoard
can slow down your training.
Returns:
TensorBoard callback: The callback to use TensorBoard.
"""
tensor_board = TensorBoard(
log_dir=log_dir,
histogram_freq=histogram_freq,
write_graph=write_graph,
write_images=write_images,
update_freq=update_freq,
profile_batch=profile_batch,
embeddings_freq=embeddings_freq,
embeddings_metadata=embeddings_metadata,
**kwargs
)
return tensor_board
def model_checkpoint(folder_path, ext = '.h5', monitor = 'val_loss'):
"""
Return the Model Checkpoint callback
Parameters:
folder_path: Folder to save the checkpoint.
ext: Extension to sabe the model. Default '.h5'. Optional.
monitor: quantity to monitor. Default 'val_loss'. Optional.
Returns:
model_checkpoint: Model checkpoint callback
"""
model_checkpoint = ModelCheckpoint(
filepath="{folder_path}/best_model-epoch_{epoch:02d}-ValLoss_{val_loss:.2f}{ext}",
monitor='val_loss')
return model_checkpoint
def callbacks(model_checkpoint_folder, early_stopping = True, tensor_board = True):
"""
Return all the modifieds callbacks
Parameters:
model_checkpoint_folder: Folder to save the checkpoint.
early_stopping: If includes the EarlyStopping callback. Optional.
tensor_board: If includes the Tensorboard callback. Optional.
Returns:
callbacks: List of callbacks
"""
callbacks = []
if early_stopping:
callbacks.append(earlyStopping())
if tensor_board:
callbacks.append(tensorBoard())
if model_checkpoint:
callbacks.append(model_checkpoint(model_checkpoint_folder))
return callbacks | 37.379888 | 90 | 0.660589 |
acdeae405b284bfa850ead7c2cc42a670a7634cd | 511 | py | Python | tests/init/mock.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 4 | 2019-05-13T21:36:31.000Z | 2021-09-06T01:56:36.000Z | tests/init/mock.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 2 | 2020-02-12T14:36:39.000Z | 2020-07-14T11:43:10.000Z | tests/init/mock.py | thusser/spexxy | 14a8d121076b9e043bdf2e27222a65088f771ff9 | [
"MIT"
] | 1 | 2019-11-08T09:26:23.000Z | 2019-11-08T09:26:23.000Z | class MockParameter(object):
def __init__(self, val):
self.name = 'parameter'
self.value = val
class MockComponent(object):
def __init__(self, name):
self.name = name
self.prefix = name
self.param_hints = {}
@property
def param_names(self):
return self.param_hints.keys()
def set_param_hint(self, name, **opts):
if name not in self.param_hints:
self.param_hints[name] = {}
self.param_hints[name].update(**opts)
| 24.333333 | 45 | 0.604697 |
acdeaec9b266da0496063d120e52f37ed66aeed2 | 138 | py | Python | launchable/utils/file_name_pattern.py | Songmu/cli | f4814e30cedad3feb5e46fd4617238a44717d4bb | [
"Apache-2.0"
] | 1 | 2021-04-18T23:42:58.000Z | 2021-04-18T23:42:58.000Z | launchable/utils/file_name_pattern.py | tchia04/cli | 0290cffba5e4d00532a4ed0a4059a0a4a81f1e9a | [
"Apache-2.0"
] | null | null | null | launchable/utils/file_name_pattern.py | tchia04/cli | 0290cffba5e4d00532a4ed0a4059a0a4a81f1e9a | [
"Apache-2.0"
] | null | null | null | import re
# find *Test, *Tests, *TestCase + .java, .scala, .kt
jvm_test_pattern = re.compile(r'^.*Test(?:Case|s)?\.(?:java|scala|kt)$')
| 27.6 | 72 | 0.623188 |
acdeaff26ca9011019091600f46fbc4f292672c5 | 24,942 | py | Python | mrrt/operators/linop/linop.py | mritools/mrrt.operators | f0acfd953b0a479798ed38fdf7fee1c46f0e2357 | [
"BSD-3-Clause"
] | null | null | null | mrrt/operators/linop/linop.py | mritools/mrrt.operators | f0acfd953b0a479798ed38fdf7fee1c46f0e2357 | [
"BSD-3-Clause"
] | null | null | null | mrrt/operators/linop/linop.py | mritools/mrrt.operators | f0acfd953b0a479798ed38fdf7fee1c46f0e2357 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, print_function, division
from ._types import integer_types, real_types, allowed_types, complex_types
import numpy as np
import logging
__docformat__ = "restructuredtext"
# Default (null) logger.
null_log = logging.getLogger("linop")
null_log.setLevel(logging.INFO)
null_log.addHandler(logging.NullHandler())
class BaseLinearOperator(object):
"""
A linear operator is a linear mapping x -> A(x) such that the size of the
input vector x is `nargin` and the size of the output is `nargout`. It can
be visualized as a matrix of shape (`nargout`, `nargin`). Its type is any
valid Numpy `dtype`. By default, it has `dtype` `numpy.float` but this can
be changed to, e.g., `numpy.complex` via the `dtype` keyword argument and
attribute.
A logger may be attached to the linear operator via the `logger` keyword
argument.
"""
def __init__(
self, nargin, nargout, symmetric=False, hermitian=False, **kwargs
):
self.__nargin = nargin
self.__nargout = nargout
self.__symmetric = symmetric
self.__hermitian = hermitian
self.__shape = (nargout, nargin)
self.__dtype = kwargs.get("dtype", np.float)
self._nMatvec = 0
# Log activity.
self.logger = kwargs.get("logger", null_log)
self.logger.info("New linear operator with shape " + str(self.shape))
return
@property
def nargin(self):
"The size of an input vector."
return self.__nargin
@property
def nargout(self):
"The size of an output vector."
return self.__nargout
@property
def symmetric(self):
"Indicates whether the operator is symmetric."
return self.__symmetric
@property
def hermitian(self):
"Indicates whether the operator is Hermitian."
return self.__hermitian
@property
def shape(self):
"The shape of the operator."
return self.__shape
@property
def dtype(self):
"The data type of the operator."
return self.__dtype
@dtype.setter
def dtype(self, value):
if value in allowed_types:
self.__dtype = value
else:
raise TypeError("Not a Numpy type")
@property
def nMatvec(self):
"The number of products with vectors computed so far."
return self._nMatvec
def reset_counters(self):
"Reset operator/vector product counter to zero."
self._nMatvec = 0
def __call__(self, *args, **kwargs):
# An alias for __mul__.
return self.__mul__(*args, **kwargs)
def __mul__(self, x):
raise NotImplementedError("Please subclass to implement __mul__.")
def __repr__(self):
if self.symmetric:
s = "Symmetric"
else:
s = "Unsymmetric"
if self.hermitian:
s += " Hermitian"
s += " <" + self.__class__.__name__ + ">"
s += " of type %s" % self.dtype
s += " with shape (%d,%d)" % (self.nargout, self.nargin)
return s
class LinearOperator(BaseLinearOperator):
"""
A linear operator constructed from a `matvec` and (possibly) a
`matvec_transp` function. If `symmetric` is `True`, `matvec_transp` is
ignored. All other keyword arguments are passed directly to the superclass.
"""
def __init__(
self,
nargin,
nargout,
matvec,
matvec_transp=None,
matvec_adj=None,
**kwargs,
):
super(LinearOperator, self).__init__(nargin, nargout, **kwargs)
transpose_of = (
kwargs.pop("transpose_of") if "transpose_of" in kwargs else None
)
adjoint_of = (
kwargs.pop("adjoint_of") if "adjoint_of" in kwargs else None
)
conjugate_of = (
kwargs.pop("conjugate_of") if "conjugate_of" in kwargs else None
)
self.__matvec = matvec
self.__set_transpose(matvec, transpose_of, matvec_transp, **kwargs)
self.__set_adjoint(matvec, adjoint_of, matvec_adj, **kwargs)
# For non-complex operators, transpose = adjoint.
if self.dtype in integer_types + real_types:
if self.__T is not None and self.__H is None:
self.__H = self.__T
elif self.__T is None and self.__H is not None:
self.__T = self.__H
else:
if (
transpose_of is None
and adjoint_of is None
and conjugate_of is None
):
# We're not in a recursive instantiation.
# Try to infer missing operators.
__conj = self.conjugate()
if self.T is not None:
self.__T.__H = __conj
if self.H is None and __conj is not None:
self.logger.debug("Inferring .H")
self.__H = __conj.T
if self.H is not None:
self.__H.__T = __conj
if self.T is None and __conj is not None:
self.logger.debug("Inferring .T")
self.__T = __conj.H
def __set_transpose(
self, matvec, transpose_of=None, matvec_transp=None, **kwargs
):
self.__T = None
if self.symmetric:
self.__T = self
return
if transpose_of is None:
if matvec_transp is not None:
# Create 'pointer' to transpose operator.
self.__T = LinearOperator(
self.nargout,
self.nargin,
matvec_transp,
matvec_transp=matvec,
transpose_of=self,
**kwargs,
)
else:
# Use operator supplied as transpose operator.
if isinstance(transpose_of, BaseLinearOperator):
self.__T = transpose_of
else:
msg = "kwarg transpose_of must be a BaseLinearOperator."
msg += " Got " + str(transpose_of.__class__)
raise ValueError(msg)
def __set_adjoint(self, matvec, adjoint_of=None, matvec_adj=None, **kwargs):
self.__H = None
if self.hermitian:
self.__H = self
return
if adjoint_of is None:
if matvec_adj is not None:
# Create 'pointer' to adjoint operator.
self.__H = LinearOperator(
self.nargout,
self.nargin,
matvec_adj,
matvec_adj=matvec,
adjoint_of=self,
**kwargs,
)
else:
# Use operator supplied as adjoint operator.
if isinstance(adjoint_of, BaseLinearOperator):
self.__H = adjoint_of
else:
msg = "kwarg adjoint_of must be a BaseLinearOperator."
msg += " Got " + str(adjoint_of.__class__)
raise ValueError(msg)
@property
def T(self):
"The transpose operator."
return self.__T
@property
def H(self):
"The adjoint operator."
return self.__H
@property
def bar(self):
"The complex conjugate operator."
return self.conjugate()
def conjugate(self):
"Return the complex conjugate operator."
if self.dtype not in complex_types:
return self
# conj(A) * x = conj(A * conj(x))
def matvec(x):
if x.dtype not in complex_types:
return (self * x).conjugate()
return (self * x.conjugate()).conjugate()
# conj(A).T * x = A.H * x = conj(A.T * conj(x))
# conj(A).H * x = A.T * x = conj(A.H * conj(x))
if self.H is not None:
matvec_transp = self.H.__matvec
if self.T is not None:
matvec_adj = self.T.__matvec
else:
def matvec_adj(x):
if x.dtype not in complex_types:
return (self.H * x).conjugate()
return (self.H * x.conjugate()).conjugate()
elif self.T is not None:
matvec_adj = self.T.__matvec
def matvec_transp(x):
if x.dtype not in complex_types:
return (self.T * x).conjugate()
return (self.T * x.conjugate()).conjugate()
else:
# Cannot infer transpose or adjoint of conjugate operator.
matvec_transp = matvec_adj = None
return LinearOperator(
self.nargin,
self.nargout,
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_adj,
transpose_of=self.H,
adjoint_of=self.T,
conjugate_of=self,
dtype=self.dtype,
)
def to_array(self):
"Convert operator to a dense matrix. This is the same as `full`."
n, m = self.shape
H = np.empty((n, m), dtype=self.dtype)
e = np.zeros(m, dtype=self.dtype)
for j in range(m):
e[j] = 1
H[:, j] = self * e
e[j] = 0
return H
def full(self):
"Convert operator to a dense matrix. This is the same as `to_array`."
return self.to_array()
def _matvec(self, x):
"""
Matrix-vector multiplication.
Encapsulates the matvec routine specified at
construct time, to ensure the consistency of the input and output
arrays with the operator's shape.
"""
x = np.asanyarray(x)
nargout, nargin = self.shape
# check input data consistency
try:
x = x.reshape(nargin)
except ValueError:
msg = "input array size incompatible with operator dimensions"
raise ValueError(msg)
y = self.__matvec(x)
# check output data consistency
try:
y = y.reshape(nargout)
except ValueError:
msg = "output array size incompatible with operator dimensions"
raise ValueError(msg)
return y
def rmatvec(self, x):
"""
Product with the conjugate transpose. This method is included for
compatibility with Scipy only. Please use the `H` attribute instead.
"""
return self.__H.__mul__(x)
def matvec(self, x):
"""
Product. This method is included for compatibility with Scipy only.
"""
return self.__mul__(x)
def __mul_scalar(self, x):
"Product between a linear operator and a scalar."
result_type = np.result_type(self.dtype, type(x))
if x == 0:
return ZeroOperator(self.nargin, self.nargout, dtype=result_type)
def matvec(y):
return x * (self(y))
def matvec_transp(y):
return x * (self.T(y))
def matvec_adj(y):
return x.conjugate() * (self.H(y))
return LinearOperator(
self.nargin,
self.nargout,
symmetric=self.symmetric,
hermitian=(result_type not in complex_types) and self.hermitian,
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_adj,
dtype=result_type,
)
def __mul_linop(self, op):
"Product between two linear operators."
if self.nargin != op.nargout:
raise ShapeError("Cannot multiply operators together")
def matvec(x):
return self(op(x))
def matvec_transp(x):
return op.T(self.T(x))
def matvec_adj(x):
return op.H(self.H(x))
result_type = np.result_type(self.dtype, op.dtype)
return LinearOperator(
op.nargin,
self.nargout,
symmetric=False, # Generally.
hermitian=False, # Generally.
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_adj,
dtype=result_type,
)
def __mul_vector(self, x):
"Product between a linear operator and a vector."
self._nMatvec += 1
result_type = np.result_type(self.dtype, x.dtype)
return self._matvec(x).astype(result_type)
def __mul__(self, x):
if np.isscalar(x):
return self.__mul_scalar(x)
if isinstance(x, BaseLinearOperator):
return self.__mul_linop(x)
if isinstance(x, np.ndarray):
return self.__mul_vector(x)
raise ValueError("Cannot multiply")
def __rmul__(self, x):
if np.isscalar(x):
return self.__mul_scalar(x)
if isinstance(x, BaseLinearOperator):
return x.__mul_linop(self)
raise ValueError("Cannot multiply")
def __add__(self, other):
if not isinstance(other, BaseLinearOperator):
raise ValueError("Cannot add")
if self.shape != other.shape:
raise ShapeError("Cannot add")
def matvec(x):
return self(x) + other(x)
def matvec_transp(x):
return self.T(x) + other.T(x)
def matvec_adj(x):
return self.H(x) + other.H(x)
result_type = np.result_type(self.dtype, other.dtype)
return LinearOperator(
self.nargin,
self.nargout,
symmetric=self.symmetric and other.symmetric,
hermitian=self.hermitian and other.hermitian,
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_adj,
dtype=result_type,
)
def __neg__(self):
return self * (-1)
def __sub__(self, other):
if not isinstance(other, BaseLinearOperator):
raise ValueError("Cannot add")
if self.shape != other.shape:
raise ShapeError("Cannot add")
def matvec(x):
return self(x) - other(x)
def matvec_transp(x):
return self.T(x) - other.T(x)
def matvec_adj(x):
return self.H(x) - other.H(x)
result_type = np.result_type(self.dtype, other.dtype)
return LinearOperator(
self.nargin,
self.nargout,
symmetric=self.symmetric and other.symmetric,
hermitian=self.hermitian and other.hermitian,
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_adj,
dtype=result_type,
)
def __div__(self, other):
if not np.isscalar(other):
raise ValueError("Cannot divide")
return self * (1 / other)
def __truediv__(self, other):
if not np.isscalar(other):
raise ValueError("Cannot divide")
return self * (1.0 / other)
def __pow__(self, other):
if not isinstance(other, int):
raise ValueError("Can only raise to integer power")
if other < 0:
raise ValueError("Can only raise to nonnegative power")
if self.nargin != self.nargout:
raise ShapeError("Can only raise square operators to a power")
if other == 0:
return IdentityOperator(self.nargin)
if other == 1:
return self
return self * self ** (other - 1)
class IdentityOperator(LinearOperator):
"""
A linear operator representing the identity operator of size `nargin`.
"""
def __init__(self, nargin, **kwargs):
if "symmetric" in kwargs:
kwargs.pop("symmetric")
if "matvec" in kwargs:
kwargs.pop("matvec")
super(IdentityOperator, self).__init__(
nargin,
nargin,
symmetric=True,
hermitian=True,
matvec=lambda x: x,
**kwargs,
)
class DiagonalOperator(LinearOperator):
"""
A diagonal linear operator defined by its diagonal `diag` (a Numpy array.)
The type must be specified in the `diag` argument, e.g.,
`np.ones(5, dtype=np.complex)` or `np.ones(5).astype(np.complex)`.
"""
def __init__(self, diag, **kwargs):
if "symmetric" in kwargs:
kwargs.pop("symmetric")
if "hermitian" in kwargs:
kwargs.pop("hermitian")
if "matvec" in kwargs:
kwargs.pop("matvec")
if "matvec_adj" in kwargs:
kwargs.pop("matvec_adj")
if "dtype" in kwargs:
kwargs.pop("dtype")
diag = np.asarray(diag)
if diag.ndim != 1:
raise ValueError("Input must be 1-d array")
self.__diag = diag.copy()
super(DiagonalOperator, self).__init__(
diag.shape[0],
diag.shape[0],
symmetric=True,
hermitian=(diag.dtype not in complex_types),
matvec=lambda x: diag * x,
matvec_adj=lambda x: diag.conjugate() * x,
dtype=diag.dtype,
**kwargs,
)
@property
def diag(self):
"Return a reference to the diagonal of the operator."
return self.__diag
def __abs__(self):
return DiagonalOperator(np.abs(self.__diag))
def _sqrt(self):
if self.dtype not in complex_types and np.any(self.__diag < 0):
raise ValueError("Math domain error")
return DiagonalOperator(np.sqrt(self.__diag))
class ZeroOperator(LinearOperator):
"""
The zero linear operator of shape `nargout`-by-`nargin`.
"""
def __init__(self, nargin, nargout, **kwargs):
if "matvec" in kwargs:
kwargs.pop("matvec")
if "matvec_transp" in kwargs:
kwargs.pop("matvec_transp")
def matvec(x):
if x.shape != (nargin,):
msg = "Input has shape " + str(x.shape)
msg += " instead of (%d,)" % self.nargin
raise ValueError(msg)
result_type = np.result_type(self.dtype, x.dtype)
return np.zeros(nargout, dtype=result_type)
def matvec_transp(x):
if x.shape != (nargout,):
msg = "Input has shape " + str(x.shape)
msg += " instead of (%d,)" % self.nargout
raise ValueError(msg)
result_type = np.result_type(self.dtype, x.dtype)
return np.zeros(nargin, dtype=result_type)
super(ZeroOperator, self).__init__(
nargin,
nargout,
symmetric=(nargin == nargout),
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_transp,
**kwargs,
)
def __abs__(self):
return self
def _sqrt(self):
return self
def ReducedLinearOperator(op, row_indices, col_indices):
"""
Reduce a linear operator by limiting its input to `col_indices` and its
output to `row_indices`.
"""
nargin, nargout = len(col_indices), len(row_indices)
m, n = op.shape # Shape of non-reduced operator.
def matvec(x):
z = np.zeros(n, dtype=x.dtype)
z[col_indices] = x[:]
y = op * z
return y[row_indices]
def matvec_transp(x):
z = np.zeros(m, dtype=x.dtype)
z[row_indices] = x[:]
y = op.T * z
return y[col_indices]
def matvec_adj(x):
z = np.zeros(m, dtype=x.dtype)
z[row_indices] = x[:]
y = op.H * z
return y[col_indices]
return LinearOperator(
nargin,
nargout,
symmetric=False,
hermitian=False,
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_adj,
dtype=op.dtype,
)
def SymmetricallyReducedLinearOperator(op, indices):
"""
Reduce a linear operator symmetrically by reducing boths its input and
output to `indices`.
"""
nargin = len(indices)
m, n = op.shape # Shape of non-reduced operator.
def matvec(x):
z = np.zeros(n, dtype=x.dtype)
z[indices] = x[:]
y = op * z
return y[indices]
def matvec_transp(x):
z = np.zeros(m, dtype=x.dtype)
z[indices] = x[:]
y = op.T * z
return y[indices]
def matvec_adj(x):
z = np.zeros(m, dtype=x.dtype)
z[indices] = x[:]
y = op.H * z
return y[indices]
return LinearOperator(
nargin,
nargin,
symmetric=op.symmetric,
hermitian=op.hermitian,
matvec=matvec,
matvec_transp=matvec_transp,
matvec_adj=matvec_adj,
dtype=op.dtype,
)
class ShapeError(Exception):
"""
Exception raised when defining a linear operator of the wrong shape or
multiplying a linear operator with a vector of the wrong shape.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def CoordLinearOperator(vals, rows, cols, nargin=0, nargout=0, symmetric=False):
"""
Return a linear operator from a sparse matrix in coordinate format.
If `nargin` or `nargout` is not specified, it will be inferred from
`rows` and `cols`. If `symmetric` is `True`, then `vals`, `rows` and
`cols` are assumed to only represent one triangle of the operator.
"""
if nargin == 0:
nargin = cols.max()
if nargout == 0:
nargout = rows.max()
def matvec(x):
if x.shape != (nargin,):
msg = "Input has shape " + str(x.shape)
msg += " instead of (%d,)" % nargin
raise ShapeError(msg)
result_type = np.result_type(x.dtype, vals.dtype)
y = np.zeros(nargout, dtype=result_type)
for k in range(len(vals)):
row = rows[k]
col = cols[k]
val = vals[k]
y[row] += val * x[col]
if symmetric and row != col:
y[col] += val * x[row]
return y
def matvec_transp(y):
if y.shape != (nargout,):
msg = "Input has shape " + str(y.shape)
msg += " instead of (%d,)" % nargout
raise ShapeError(msg)
result_type = np.result_type(y.dtype, vals.dtype)
x = np.zeros(nargin, dtype=result_type)
for k in range(len(vals)):
row = rows[k]
col = cols[k]
val = vals[k]
x[col] += val * y[row]
if symmetric and row != col:
x[row] += val * y[col]
return x
return LinearOperator(
nargin,
nargout,
matvec=matvec,
matvec_transp=matvec_transp,
symmetric=symmetric,
dtype=vals.dtype,
)
def PysparseLinearOperator(A):
"Return a linear operator from a Pysparse sparse matrix."
nargout, nargin = A.shape
try:
symmetric = A.issym
except AttributeError:
symmetric = A.isSymmetric()
def matvec(x):
if x.shape != (nargin,):
msg = "Input has shape " + str(x.shape)
msg += " instead of (%d,)" % nargin
raise ShapeError(msg)
if hasattr(A, "__mul__"):
return A * x
Ax = np.empty(nargout)
A.matvec(x, Ax)
return Ax
def matvec_transp(y):
if y.shape != (nargout,):
msg = "Input has shape " + str(y.shape)
msg += " instead of (%d,)" % nargout
raise ShapeError(msg)
if hasattr(A, "__rmul__"):
return y * A
ATy = np.empty(nargin)
A.matvec_transp(y, ATy)
return ATy
return LinearOperator(
nargin,
nargout,
matvec=matvec,
matvec_transp=matvec_transp,
symmetric=symmetric,
)
def linop_from_ndarray(A, symmetric=False, **kwargs):
"Return a linear operator from a Numpy `ndarray`."
hermitian = kwargs.get("hermitian", symmetric)
if A.dtype in complex_types:
return LinearOperator(
A.shape[1],
A.shape[0],
lambda v: np.dot(A, v),
matvec_transp=lambda u: np.dot(A.T, u),
matvec_adj=lambda w: np.dot(A.conjugate().T, w),
symmetric=symmetric,
hermitian=hermitian,
dtype=A.dtype,
)
if symmetric ^ hermitian:
raise ValueError("For non-complex operators, transpose = adjoint.")
return LinearOperator(
A.shape[1],
A.shape[0],
lambda v: np.dot(A, v),
matvec_transp=lambda u: np.dot(A.T, u),
symmetric=symmetric or hermitian,
hermitian=symmetric or hermitian,
dtype=A.dtype,
)
def sqrt(op):
"""
Return the square root of a linear operator, if defined. Note that
this is not the elementwise square root. The result is a linear operator
that, when composed with itself, yields the original operator.
"""
return op._sqrt()
| 29.51716 | 80 | 0.55585 |
acdeaffa5462e27d2ed24411993832a9925360a7 | 521 | py | Python | coursework1/task8/combiner.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | coursework1/task8/combiner.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | coursework1/task8/combiner.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
prev_sid = ""
cntcourse = 0
totalmark = 0
for line in sys.stdin:
sid, info = line.strip().split("\t",1)
scount,smark = info.split(",")
if sid != prev_sid:
if prev_sid != "":
print "%s\t%d,%d"%(prev_sid,cntcourse,totalmark)
prev_sid = sid
cntcourse = int(scount)
totalmark = int(smark)
else:
cntcourse += int(scount)
totalmark += int(smark)
if prev_sid != "":
print "%s\t%d,%d"%(prev_sid,cntcourse,totalmark)
| 21.708333 | 60 | 0.570058 |
acdeb030f724697dcb1e193e5b7fb8bba9394bbd | 90,051 | py | Python | lib/matplotlib/_mathtext_data.py | adrn/matplotlib | 7a9f2347a3b1c1efaef6e547930661a547c20ef2 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/_mathtext_data.py | adrn/matplotlib | 7a9f2347a3b1c1efaef6e547930661a547c20ef2 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/_mathtext_data.py | adrn/matplotlib | 7a9f2347a3b1c1efaef6e547930661a547c20ef2 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
font data tables for truetype and afm computer modern fonts
"""
# this dict maps symbol names to fontnames, glyphindex. To get the
# glyph index from the character code, you have to use get_charmap
from __future__ import absolute_import, division, print_function, unicode_literals
import six
"""
from matplotlib.ft2font import FT2Font
font = FT2Font('/usr/local/share/matplotlib/cmr10.ttf')
items = font.get_charmap().items()
items.sort()
for charcode, glyphind in items:
print charcode, glyphind
"""
latex_to_bakoma = {
r'\oint' : ('cmex10', 45),
r'\bigodot' : ('cmex10', 50),
r'\bigoplus' : ('cmex10', 55),
r'\bigotimes' : ('cmex10', 59),
r'\sum' : ('cmex10', 51),
r'\prod' : ('cmex10', 24),
r'\int' : ('cmex10', 56),
r'\bigcup' : ('cmex10', 28),
r'\bigcap' : ('cmex10', 60),
r'\biguplus' : ('cmex10', 32),
r'\bigwedge' : ('cmex10', 4),
r'\bigvee' : ('cmex10', 37),
r'\coprod' : ('cmex10', 42),
r'\__sqrt__' : ('cmex10', 48),
r'\leftbrace' : ('cmex10', 92),
r'{' : ('cmex10', 92),
r'\{' : ('cmex10', 92),
r'\rightbrace' : ('cmex10', 130),
r'}' : ('cmex10', 130),
r'\}' : ('cmex10', 130),
r'\leftangle' : ('cmex10', 97),
r'\rightangle' : ('cmex10', 64),
r'\langle' : ('cmex10', 97),
r'\rangle' : ('cmex10', 64),
r'\widehat' : ('cmex10', 15),
r'\widetilde' : ('cmex10', 52),
r'\widebar' : ('cmr10', 131),
r'\omega' : ('cmmi10', 29),
r'\varepsilon' : ('cmmi10', 20),
r'\vartheta' : ('cmmi10', 22),
r'\varrho' : ('cmmi10', 61),
r'\varsigma' : ('cmmi10', 41),
r'\varphi' : ('cmmi10', 6),
r'\leftharpoonup' : ('cmmi10', 108),
r'\leftharpoondown' : ('cmmi10', 68),
r'\rightharpoonup' : ('cmmi10', 117),
r'\rightharpoondown' : ('cmmi10', 77),
r'\triangleright' : ('cmmi10', 130),
r'\triangleleft' : ('cmmi10', 89),
r'.' : ('cmmi10', 51),
r',' : ('cmmi10', 44),
r'<' : ('cmmi10', 99),
r'/' : ('cmmi10', 98),
r'>' : ('cmmi10', 107),
r'\flat' : ('cmmi10', 131),
r'\natural' : ('cmmi10', 90),
r'\sharp' : ('cmmi10', 50),
r'\smile' : ('cmmi10', 97),
r'\frown' : ('cmmi10', 58),
r'\ell' : ('cmmi10', 102),
r'\imath' : ('cmmi10', 8),
r'\jmath' : ('cmmi10', 65),
r'\wp' : ('cmmi10', 14),
r'\alpha' : ('cmmi10', 13),
r'\beta' : ('cmmi10', 35),
r'\gamma' : ('cmmi10', 24),
r'\delta' : ('cmmi10', 38),
r'\epsilon' : ('cmmi10', 54),
r'\zeta' : ('cmmi10', 10),
r'\eta' : ('cmmi10', 5),
r'\theta' : ('cmmi10', 18),
r'\iota' : ('cmmi10', 28),
r'\lambda' : ('cmmi10', 9),
r'\mu' : ('cmmi10', 32),
r'\nu' : ('cmmi10', 34),
r'\xi' : ('cmmi10', 7),
r'\pi' : ('cmmi10', 36),
r'\kappa' : ('cmmi10', 30),
r'\rho' : ('cmmi10', 39),
r'\sigma' : ('cmmi10', 21),
r'\tau' : ('cmmi10', 43),
'\\upsilon' : ('cmmi10', 25),
r'\phi' : ('cmmi10', 42),
r'\chi' : ('cmmi10', 17),
r'\psi' : ('cmmi10', 31),
r'|' : ('cmsy10', 47),
r'\|' : ('cmsy10', 47),
r'(' : ('cmr10', 119),
r'\leftparen' : ('cmr10', 119),
r'\rightparen' : ('cmr10', 68),
r')' : ('cmr10', 68),
r'+' : ('cmr10', 76),
r'0' : ('cmr10', 40),
r'1' : ('cmr10', 100),
r'2' : ('cmr10', 49),
r'3' : ('cmr10', 110),
r'4' : ('cmr10', 59),
r'5' : ('cmr10', 120),
r'6' : ('cmr10', 69),
r'7' : ('cmr10', 127),
r'8' : ('cmr10', 77),
r'9' : ('cmr10', 22),
r':' : ('cmr10', 85),
r';' : ('cmr10', 31),
r'=' : ('cmr10', 41),
r'\leftbracket' : ('cmr10', 62),
r'[' : ('cmr10', 62),
r'\rightbracket' : ('cmr10', 72),
r']' : ('cmr10', 72),
r'\%' : ('cmr10', 48),
r'%' : ('cmr10', 48),
r'\$' : ('cmr10', 99),
r'@' : ('cmr10', 111),
r'\#' : ('cmr10', 39),
r'\_' : ('cmtt10', 79),
r'\Gamma' : ('cmr10', 19),
r'\Delta' : ('cmr10', 6),
r'\Theta' : ('cmr10', 7),
r'\Lambda' : ('cmr10', 14),
r'\Xi' : ('cmr10', 3),
r'\Pi' : ('cmr10', 17),
r'\Sigma' : ('cmr10', 10),
'\\Upsilon' : ('cmr10', 11),
r'\Phi' : ('cmr10', 9),
r'\Psi' : ('cmr10', 15),
r'\Omega' : ('cmr10', 12),
r'\prime' : ('cmsy10', 73),
# these are mathml names, I think. I'm just using them for the
# tex methods noted
r'\circumflexaccent' : ('cmr10', 124), # for \hat
r'\combiningbreve' : ('cmr10', 81), # for \breve
r'\combiningoverline' : ('cmr10', 131), # for \bar
r'\combininggraveaccent' : ('cmr10', 114), # for \grave
r'\combiningacuteaccent' : ('cmr10', 63), # for \accute
r'\combiningdiaeresis' : ('cmr10', 91), # for \ddot
r'\combiningtilde' : ('cmr10', 75), # for \tilde
r'\combiningrightarrowabove' : ('cmmi10', 110), # for \vec
r'\combiningdotabove' : ('cmr10', 26), # for \dot
r'\leftarrow' : ('cmsy10', 10),
'\\uparrow' : ('cmsy10', 25),
r'\downarrow' : ('cmsy10', 28),
r'\leftrightarrow' : ('cmsy10', 24),
r'\nearrow' : ('cmsy10', 99),
r'\searrow' : ('cmsy10', 57),
r'\simeq' : ('cmsy10', 108),
r'\Leftarrow' : ('cmsy10', 104),
r'\Rightarrow' : ('cmsy10', 112),
'\\Uparrow' : ('cmsy10', 60),
r'\Downarrow' : ('cmsy10', 68),
r'\Leftrightarrow' : ('cmsy10', 51),
r'\nwarrow' : ('cmsy10', 65),
r'\swarrow' : ('cmsy10', 116),
r'\propto' : ('cmsy10', 15),
r'\infty' : ('cmsy10', 32),
r'\in' : ('cmsy10', 59),
r'\ni' : ('cmsy10', 122),
r'\bigtriangleup' : ('cmsy10', 80),
r'\bigtriangledown' : ('cmsy10', 132),
r'\slash' : ('cmsy10', 87),
r'\forall' : ('cmsy10', 21),
r'\exists' : ('cmsy10', 5),
r'\neg' : ('cmsy10', 20),
r'\emptyset' : ('cmsy10', 33),
r'\Re' : ('cmsy10', 95),
r'\Im' : ('cmsy10', 52),
r'\top' : ('cmsy10', 100),
r'\bot' : ('cmsy10', 11),
r'\aleph' : ('cmsy10', 26),
r'\cup' : ('cmsy10', 6),
r'\cap' : ('cmsy10', 19),
'\\uplus' : ('cmsy10', 58),
r'\wedge' : ('cmsy10', 43),
r'\vee' : ('cmsy10', 96),
r'\vdash' : ('cmsy10', 109),
r'\dashv' : ('cmsy10', 66),
r'\lfloor' : ('cmsy10', 117),
r'\rfloor' : ('cmsy10', 74),
r'\lceil' : ('cmsy10', 123),
r'\rceil' : ('cmsy10', 81),
r'\lbrace' : ('cmsy10', 92),
r'\rbrace' : ('cmsy10', 105),
r'\mid' : ('cmsy10', 47),
r'\vert' : ('cmsy10', 47),
r'\Vert' : ('cmsy10', 44),
'\\updownarrow' : ('cmsy10', 94),
'\\Updownarrow' : ('cmsy10', 53),
r'\backslash' : ('cmsy10', 126),
r'\wr' : ('cmsy10', 101),
r'\nabla' : ('cmsy10', 110),
r'\sqcup' : ('cmsy10', 67),
r'\sqcap' : ('cmsy10', 118),
r'\sqsubseteq' : ('cmsy10', 75),
r'\sqsupseteq' : ('cmsy10', 124),
r'\S' : ('cmsy10', 129),
r'\dag' : ('cmsy10', 71),
r'\ddag' : ('cmsy10', 127),
r'\P' : ('cmsy10', 130),
r'\clubsuit' : ('cmsy10', 18),
r'\diamondsuit' : ('cmsy10', 34),
r'\heartsuit' : ('cmsy10', 22),
r'-' : ('cmsy10', 17),
r'\cdot' : ('cmsy10', 78),
r'\times' : ('cmsy10', 13),
r'*' : ('cmsy10', 9),
r'\ast' : ('cmsy10', 9),
r'\div' : ('cmsy10', 31),
r'\diamond' : ('cmsy10', 48),
r'\pm' : ('cmsy10', 8),
r'\mp' : ('cmsy10', 98),
r'\oplus' : ('cmsy10', 16),
r'\ominus' : ('cmsy10', 56),
r'\otimes' : ('cmsy10', 30),
r'\oslash' : ('cmsy10', 107),
r'\odot' : ('cmsy10', 64),
r'\bigcirc' : ('cmsy10', 115),
r'\circ' : ('cmsy10', 72),
r'\bullet' : ('cmsy10', 84),
r'\asymp' : ('cmsy10', 121),
r'\equiv' : ('cmsy10', 35),
r'\subseteq' : ('cmsy10', 103),
r'\supseteq' : ('cmsy10', 42),
r'\leq' : ('cmsy10', 14),
r'\geq' : ('cmsy10', 29),
r'\preceq' : ('cmsy10', 79),
r'\succeq' : ('cmsy10', 131),
r'\sim' : ('cmsy10', 27),
r'\approx' : ('cmsy10', 23),
r'\subset' : ('cmsy10', 50),
r'\supset' : ('cmsy10', 86),
r'\ll' : ('cmsy10', 85),
r'\gg' : ('cmsy10', 40),
r'\prec' : ('cmsy10', 93),
r'\succ' : ('cmsy10', 49),
r'\rightarrow' : ('cmsy10', 12),
r'\to' : ('cmsy10', 12),
r'\spadesuit' : ('cmsy10', 7),
r'?' : ('cmr10', 50),
r'!' : ('cmr10', 29),
r'&' : ('cmr10', 109)
}
latex_to_cmex = {
r'\__sqrt__' : 112,
r'\bigcap' : 92,
r'\bigcup' : 91,
r'\bigodot' : 75,
r'\bigoplus' : 77,
r'\bigotimes' : 79,
r'\biguplus' : 93,
r'\bigvee' : 95,
r'\bigwedge' : 94,
r'\coprod' : 97,
r'\int' : 90,
r'\leftangle' : 173,
r'\leftbrace' : 169,
r'\oint' : 73,
r'\prod' : 89,
r'\rightangle' : 174,
r'\rightbrace' : 170,
r'\sum' : 88,
r'\widehat' : 98,
r'\widetilde' : 101,
}
latex_to_standard = {
r'\cong' : ('psyr', 64),
r'\Delta' : ('psyr', 68),
r'\Phi' : ('psyr', 70),
r'\Gamma' : ('psyr', 89),
r'\alpha' : ('psyr', 97),
r'\beta' : ('psyr', 98),
r'\chi' : ('psyr', 99),
r'\delta' : ('psyr', 100),
r'\varepsilon' : ('psyr', 101),
r'\phi' : ('psyr', 102),
r'\gamma' : ('psyr', 103),
r'\eta' : ('psyr', 104),
r'\iota' : ('psyr', 105),
r'\varpsi' : ('psyr', 106),
r'\kappa' : ('psyr', 108),
r'\nu' : ('psyr', 110),
r'\pi' : ('psyr', 112),
r'\theta' : ('psyr', 113),
r'\rho' : ('psyr', 114),
r'\sigma' : ('psyr', 115),
r'\tau' : ('psyr', 116),
'\\upsilon' : ('psyr', 117),
r'\varpi' : ('psyr', 118),
r'\omega' : ('psyr', 119),
r'\xi' : ('psyr', 120),
r'\psi' : ('psyr', 121),
r'\zeta' : ('psyr', 122),
r'\sim' : ('psyr', 126),
r'\leq' : ('psyr', 163),
r'\infty' : ('psyr', 165),
r'\clubsuit' : ('psyr', 167),
r'\diamondsuit' : ('psyr', 168),
r'\heartsuit' : ('psyr', 169),
r'\spadesuit' : ('psyr', 170),
r'\leftrightarrow' : ('psyr', 171),
r'\leftarrow' : ('psyr', 172),
'\\uparrow' : ('psyr', 173),
r'\rightarrow' : ('psyr', 174),
r'\downarrow' : ('psyr', 175),
r'\pm' : ('psyr', 176),
r'\geq' : ('psyr', 179),
r'\times' : ('psyr', 180),
r'\propto' : ('psyr', 181),
r'\partial' : ('psyr', 182),
r'\bullet' : ('psyr', 183),
r'\div' : ('psyr', 184),
r'\neq' : ('psyr', 185),
r'\equiv' : ('psyr', 186),
r'\approx' : ('psyr', 187),
r'\ldots' : ('psyr', 188),
r'\aleph' : ('psyr', 192),
r'\Im' : ('psyr', 193),
r'\Re' : ('psyr', 194),
r'\wp' : ('psyr', 195),
r'\otimes' : ('psyr', 196),
r'\oplus' : ('psyr', 197),
r'\oslash' : ('psyr', 198),
r'\cap' : ('psyr', 199),
r'\cup' : ('psyr', 200),
r'\supset' : ('psyr', 201),
r'\supseteq' : ('psyr', 202),
r'\subset' : ('psyr', 204),
r'\subseteq' : ('psyr', 205),
r'\in' : ('psyr', 206),
r'\notin' : ('psyr', 207),
r'\angle' : ('psyr', 208),
r'\nabla' : ('psyr', 209),
r'\textregistered' : ('psyr', 210),
r'\copyright' : ('psyr', 211),
r'\texttrademark' : ('psyr', 212),
r'\Pi' : ('psyr', 213),
r'\prod' : ('psyr', 213),
r'\surd' : ('psyr', 214),
r'\__sqrt__' : ('psyr', 214),
r'\cdot' : ('psyr', 215),
'\\urcorner' : ('psyr', 216),
r'\vee' : ('psyr', 217),
r'\wedge' : ('psyr', 218),
r'\Leftrightarrow' : ('psyr', 219),
r'\Leftarrow' : ('psyr', 220),
'\\Uparrow' : ('psyr', 221),
r'\Rightarrow' : ('psyr', 222),
r'\Downarrow' : ('psyr', 223),
r'\Diamond' : ('psyr', 224),
r'\langle' : ('psyr', 225),
r'\Sigma' : ('psyr', 229),
r'\sum' : ('psyr', 229),
r'\forall' : ('psyr', 34),
r'\exists' : ('psyr', 36),
r'\lceil' : ('psyr', 233),
r'\lbrace' : ('psyr', 123),
r'\Psi' : ('psyr', 89),
r'\bot' : ('psyr', 0o136),
r'\Omega' : ('psyr', 0o127),
r'\leftbracket' : ('psyr', 0o133),
r'\rightbracket' : ('psyr', 0o135),
r'\leftbrace' : ('psyr', 123),
r'\leftparen' : ('psyr', 0o50),
r'\prime' : ('psyr', 0o242),
r'\sharp' : ('psyr', 0o43),
r'\slash' : ('psyr', 0o57),
r'\Lamda' : ('psyr', 0o114),
r'\neg' : ('psyr', 0o330),
'\\Upsilon' : ('psyr', 0o241),
r'\rightbrace' : ('psyr', 0o175),
r'\rfloor' : ('psyr', 0o373),
r'\lambda' : ('psyr', 0o154),
r'\to' : ('psyr', 0o256),
r'\Xi' : ('psyr', 0o130),
r'\emptyset' : ('psyr', 0o306),
r'\lfloor' : ('psyr', 0o353),
r'\rightparen' : ('psyr', 0o51),
r'\rceil' : ('psyr', 0o371),
r'\ni' : ('psyr', 0o47),
r'\epsilon' : ('psyr', 0o145),
r'\Theta' : ('psyr', 0o121),
r'\langle' : ('psyr', 0o341),
r'\leftangle' : ('psyr', 0o341),
r'\rangle' : ('psyr', 0o361),
r'\rightangle' : ('psyr', 0o361),
r'\rbrace' : ('psyr', 0o175),
r'\circ' : ('psyr', 0o260),
r'\diamond' : ('psyr', 0o340),
r'\mu' : ('psyr', 0o155),
r'\mid' : ('psyr', 0o352),
r'\imath' : ('pncri8a', 105),
r'\%' : ('pncr8a', 37),
r'\$' : ('pncr8a', 36),
r'\{' : ('pncr8a', 123),
r'\}' : ('pncr8a', 125),
r'\backslash' : ('pncr8a', 92),
r'\ast' : ('pncr8a', 42),
r'\#' : ('pncr8a', 35),
r'\circumflexaccent' : ('pncri8a', 124), # for \hat
r'\combiningbreve' : ('pncri8a', 81), # for \breve
r'\combininggraveaccent' : ('pncri8a', 114), # for \grave
r'\combiningacuteaccent' : ('pncri8a', 63), # for \accute
r'\combiningdiaeresis' : ('pncri8a', 91), # for \ddot
r'\combiningtilde' : ('pncri8a', 75), # for \tilde
r'\combiningrightarrowabove' : ('pncri8a', 110), # for \vec
r'\combiningdotabove' : ('pncri8a', 26), # for \dot
}
# Automatically generated.
type12uni = {
'uni24C8' : 9416,
'aring' : 229,
'uni22A0' : 8864,
'uni2292' : 8850,
'quotedblright' : 8221,
'uni03D2' : 978,
'uni2215' : 8725,
'uni03D0' : 976,
'V' : 86,
'dollar' : 36,
'uni301E' : 12318,
'uni03D5' : 981,
'four' : 52,
'uni25A0' : 9632,
'uni013C' : 316,
'uni013B' : 315,
'uni013E' : 318,
'Yacute' : 221,
'uni25DE' : 9694,
'uni013F' : 319,
'uni255A' : 9562,
'uni2606' : 9734,
'uni0180' : 384,
'uni22B7' : 8887,
'uni044F' : 1103,
'uni22B5' : 8885,
'uni22B4' : 8884,
'uni22AE' : 8878,
'uni22B2' : 8882,
'uni22B1' : 8881,
'uni22B0' : 8880,
'uni25CD' : 9677,
'uni03CE' : 974,
'uni03CD' : 973,
'uni03CC' : 972,
'uni03CB' : 971,
'uni03CA' : 970,
'uni22B8' : 8888,
'uni22C9' : 8905,
'uni0449' : 1097,
'uni20DD' : 8413,
'uni20DC' : 8412,
'uni20DB' : 8411,
'uni2231' : 8753,
'uni25CF' : 9679,
'uni306E' : 12398,
'uni03D1' : 977,
'uni01A1' : 417,
'uni20D7' : 8407,
'uni03D6' : 982,
'uni2233' : 8755,
'uni20D2' : 8402,
'uni20D1' : 8401,
'uni20D0' : 8400,
'P' : 80,
'uni22BE' : 8894,
'uni22BD' : 8893,
'uni22BC' : 8892,
'uni22BB' : 8891,
'underscore' : 95,
'uni03C8' : 968,
'uni03C7' : 967,
'uni0328' : 808,
'uni03C5' : 965,
'uni03C4' : 964,
'uni03C3' : 963,
'uni03C2' : 962,
'uni03C1' : 961,
'uni03C0' : 960,
'uni2010' : 8208,
'uni0130' : 304,
'uni0133' : 307,
'uni0132' : 306,
'uni0135' : 309,
'uni0134' : 308,
'uni0137' : 311,
'uni0136' : 310,
'uni0139' : 313,
'uni0138' : 312,
'uni2244' : 8772,
'uni229A' : 8858,
'uni2571' : 9585,
'uni0278' : 632,
'uni2239' : 8761,
'p' : 112,
'uni3019' : 12313,
'uni25CB' : 9675,
'uni03DB' : 987,
'uni03DC' : 988,
'uni03DA' : 986,
'uni03DF' : 991,
'uni03DD' : 989,
'uni013D' : 317,
'uni220A' : 8714,
'uni220C' : 8716,
'uni220B' : 8715,
'uni220E' : 8718,
'uni220D' : 8717,
'uni220F' : 8719,
'uni22CC' : 8908,
'Otilde' : 213,
'uni25E5' : 9701,
'uni2736' : 10038,
'perthousand' : 8240,
'zero' : 48,
'uni279B' : 10139,
'dotlessi' : 305,
'uni2279' : 8825,
'Scaron' : 352,
'zcaron' : 382,
'uni21D8' : 8664,
'egrave' : 232,
'uni0271' : 625,
'uni01AA' : 426,
'uni2332' : 9010,
'section' : 167,
'uni25E4' : 9700,
'Icircumflex' : 206,
'ntilde' : 241,
'uni041E' : 1054,
'ampersand' : 38,
'uni041C' : 1052,
'uni041A' : 1050,
'uni22AB' : 8875,
'uni21DB' : 8667,
'dotaccent' : 729,
'uni0416' : 1046,
'uni0417' : 1047,
'uni0414' : 1044,
'uni0415' : 1045,
'uni0412' : 1042,
'uni0413' : 1043,
'degree' : 176,
'uni0411' : 1041,
'K' : 75,
'uni25EB' : 9707,
'uni25EF' : 9711,
'uni0418' : 1048,
'uni0419' : 1049,
'uni2263' : 8803,
'uni226E' : 8814,
'uni2251' : 8785,
'uni02C8' : 712,
'uni2262' : 8802,
'acircumflex' : 226,
'uni22B3' : 8883,
'uni2261' : 8801,
'uni2394' : 9108,
'Aring' : 197,
'uni2260' : 8800,
'uni2254' : 8788,
'uni0436' : 1078,
'uni2267' : 8807,
'k' : 107,
'uni22C8' : 8904,
'uni226A' : 8810,
'uni231F' : 8991,
'smalltilde' : 732,
'uni2201' : 8705,
'uni2200' : 8704,
'uni2203' : 8707,
'uni02BD' : 701,
'uni2205' : 8709,
'uni2204' : 8708,
'Agrave' : 192,
'uni2206' : 8710,
'uni2209' : 8713,
'uni2208' : 8712,
'uni226D' : 8813,
'uni2264' : 8804,
'uni263D' : 9789,
'uni2258' : 8792,
'uni02D3' : 723,
'uni02D2' : 722,
'uni02D1' : 721,
'uni02D0' : 720,
'uni25E1' : 9697,
'divide' : 247,
'uni02D5' : 725,
'uni02D4' : 724,
'ocircumflex' : 244,
'uni2524' : 9508,
'uni043A' : 1082,
'uni24CC' : 9420,
'asciitilde' : 126,
'uni22B9' : 8889,
'uni24D2' : 9426,
'uni211E' : 8478,
'uni211D' : 8477,
'uni24DD' : 9437,
'uni211A' : 8474,
'uni211C' : 8476,
'uni211B' : 8475,
'uni25C6' : 9670,
'uni017F' : 383,
'uni017A' : 378,
'uni017C' : 380,
'uni017B' : 379,
'uni0346' : 838,
'uni22F1' : 8945,
'uni22F0' : 8944,
'two' : 50,
'uni2298' : 8856,
'uni24D1' : 9425,
'E' : 69,
'uni025D' : 605,
'scaron' : 353,
'uni2322' : 8994,
'uni25E3' : 9699,
'uni22BF' : 8895,
'F' : 70,
'uni0440' : 1088,
'uni255E' : 9566,
'uni22BA' : 8890,
'uni0175' : 373,
'uni0174' : 372,
'uni0177' : 375,
'uni0176' : 374,
'bracketleft' : 91,
'uni0170' : 368,
'uni0173' : 371,
'uni0172' : 370,
'asciicircum' : 94,
'uni0179' : 377,
'uni2590' : 9616,
'uni25E2' : 9698,
'uni2119' : 8473,
'uni2118' : 8472,
'uni25CC' : 9676,
'f' : 102,
'ordmasculine' : 186,
'uni229B' : 8859,
'uni22A1' : 8865,
'uni2111' : 8465,
'uni2110' : 8464,
'uni2113' : 8467,
'uni2112' : 8466,
'mu' : 181,
'uni2281' : 8833,
'paragraph' : 182,
'nine' : 57,
'uni25EC' : 9708,
'v' : 118,
'uni040C' : 1036,
'uni0113' : 275,
'uni22D0' : 8912,
'uni21CC' : 8652,
'uni21CB' : 8651,
'uni21CA' : 8650,
'uni22A5' : 8869,
'uni21CF' : 8655,
'uni21CE' : 8654,
'uni21CD' : 8653,
'guilsinglleft' : 8249,
'backslash' : 92,
'uni2284' : 8836,
'uni224E' : 8782,
'uni224D' : 8781,
'uni224F' : 8783,
'uni224A' : 8778,
'uni2287' : 8839,
'uni224C' : 8780,
'uni224B' : 8779,
'uni21BD' : 8637,
'uni2286' : 8838,
'uni030F' : 783,
'uni030D' : 781,
'uni030E' : 782,
'uni030B' : 779,
'uni030C' : 780,
'uni030A' : 778,
'uni026E' : 622,
'uni026D' : 621,
'six' : 54,
'uni026A' : 618,
'uni026C' : 620,
'uni25C1' : 9665,
'uni20D6' : 8406,
'uni045B' : 1115,
'uni045C' : 1116,
'uni256B' : 9579,
'uni045A' : 1114,
'uni045F' : 1119,
'uni045E' : 1118,
'A' : 65,
'uni2569' : 9577,
'uni0458' : 1112,
'uni0459' : 1113,
'uni0452' : 1106,
'uni0453' : 1107,
'uni2562' : 9570,
'uni0451' : 1105,
'uni0456' : 1110,
'uni0457' : 1111,
'uni0454' : 1108,
'uni0455' : 1109,
'icircumflex' : 238,
'uni0307' : 775,
'uni0304' : 772,
'uni0305' : 773,
'uni0269' : 617,
'uni0268' : 616,
'uni0300' : 768,
'uni0301' : 769,
'uni0265' : 613,
'uni0264' : 612,
'uni0267' : 615,
'uni0266' : 614,
'uni0261' : 609,
'uni0260' : 608,
'uni0263' : 611,
'uni0262' : 610,
'a' : 97,
'uni2207' : 8711,
'uni2247' : 8775,
'uni2246' : 8774,
'uni2241' : 8769,
'uni2240' : 8768,
'uni2243' : 8771,
'uni2242' : 8770,
'uni2312' : 8978,
'ogonek' : 731,
'uni2249' : 8777,
'uni2248' : 8776,
'uni3030' : 12336,
'q' : 113,
'uni21C2' : 8642,
'uni21C1' : 8641,
'uni21C0' : 8640,
'uni21C7' : 8647,
'uni21C6' : 8646,
'uni21C5' : 8645,
'uni21C4' : 8644,
'uni225F' : 8799,
'uni212C' : 8492,
'uni21C8' : 8648,
'uni2467' : 9319,
'oacute' : 243,
'uni028F' : 655,
'uni028E' : 654,
'uni026F' : 623,
'uni028C' : 652,
'uni028B' : 651,
'uni028A' : 650,
'uni2510' : 9488,
'ograve' : 242,
'edieresis' : 235,
'uni22CE' : 8910,
'uni22CF' : 8911,
'uni219F' : 8607,
'comma' : 44,
'uni22CA' : 8906,
'uni0429' : 1065,
'uni03C6' : 966,
'uni0427' : 1063,
'uni0426' : 1062,
'uni0425' : 1061,
'uni0424' : 1060,
'uni0423' : 1059,
'uni0422' : 1058,
'uni0421' : 1057,
'uni0420' : 1056,
'uni2465' : 9317,
'uni24D0' : 9424,
'uni2464' : 9316,
'uni0430' : 1072,
'otilde' : 245,
'uni2661' : 9825,
'uni24D6' : 9430,
'uni2466' : 9318,
'uni24D5' : 9429,
'uni219A' : 8602,
'uni2518' : 9496,
'uni22B6' : 8886,
'uni2461' : 9313,
'uni24D4' : 9428,
'uni2460' : 9312,
'uni24EA' : 9450,
'guillemotright' : 187,
'ecircumflex' : 234,
'greater' : 62,
'uni2011' : 8209,
'uacute' : 250,
'uni2462' : 9314,
'L' : 76,
'bullet' : 8226,
'uni02A4' : 676,
'uni02A7' : 679,
'cedilla' : 184,
'uni02A2' : 674,
'uni2015' : 8213,
'uni22C4' : 8900,
'uni22C5' : 8901,
'uni22AD' : 8877,
'uni22C7' : 8903,
'uni22C0' : 8896,
'uni2016' : 8214,
'uni22C2' : 8898,
'uni22C3' : 8899,
'uni24CF' : 9423,
'uni042F' : 1071,
'uni042E' : 1070,
'uni042D' : 1069,
'ydieresis' : 255,
'l' : 108,
'logicalnot' : 172,
'uni24CA' : 9418,
'uni0287' : 647,
'uni0286' : 646,
'uni0285' : 645,
'uni0284' : 644,
'uni0283' : 643,
'uni0282' : 642,
'uni0281' : 641,
'uni027C' : 636,
'uni2664' : 9828,
'exclamdown' : 161,
'uni25C4' : 9668,
'uni0289' : 649,
'uni0288' : 648,
'uni039A' : 922,
'endash' : 8211,
'uni2640' : 9792,
'uni20E4' : 8420,
'uni0473' : 1139,
'uni20E1' : 8417,
'uni2642' : 9794,
'uni03B8' : 952,
'uni03B9' : 953,
'agrave' : 224,
'uni03B4' : 948,
'uni03B5' : 949,
'uni03B6' : 950,
'uni03B7' : 951,
'uni03B0' : 944,
'uni03B1' : 945,
'uni03B2' : 946,
'uni03B3' : 947,
'uni2555' : 9557,
'Adieresis' : 196,
'germandbls' : 223,
'Odieresis' : 214,
'space' : 32,
'uni0126' : 294,
'uni0127' : 295,
'uni0124' : 292,
'uni0125' : 293,
'uni0122' : 290,
'uni0123' : 291,
'uni0120' : 288,
'uni0121' : 289,
'quoteright' : 8217,
'uni2560' : 9568,
'uni2556' : 9558,
'ucircumflex' : 251,
'uni2561' : 9569,
'uni2551' : 9553,
'uni25B2' : 9650,
'uni2550' : 9552,
'uni2563' : 9571,
'uni2553' : 9555,
'G' : 71,
'uni2564' : 9572,
'uni2552' : 9554,
'quoteleft' : 8216,
'uni2565' : 9573,
'uni2572' : 9586,
'uni2568' : 9576,
'uni2566' : 9574,
'W' : 87,
'uni214A' : 8522,
'uni012F' : 303,
'uni012D' : 301,
'uni012E' : 302,
'uni012B' : 299,
'uni012C' : 300,
'uni255C' : 9564,
'uni012A' : 298,
'uni2289' : 8841,
'Q' : 81,
'uni2320' : 8992,
'uni2321' : 8993,
'g' : 103,
'uni03BD' : 957,
'uni03BE' : 958,
'uni03BF' : 959,
'uni2282' : 8834,
'uni2285' : 8837,
'uni03BA' : 954,
'uni03BB' : 955,
'uni03BC' : 956,
'uni2128' : 8488,
'uni25B7' : 9655,
'w' : 119,
'uni0302' : 770,
'uni03DE' : 990,
'uni25DA' : 9690,
'uni0303' : 771,
'uni0463' : 1123,
'uni0462' : 1122,
'uni3018' : 12312,
'uni2514' : 9492,
'question' : 63,
'uni25B3' : 9651,
'uni24E1' : 9441,
'one' : 49,
'uni200A' : 8202,
'uni2278' : 8824,
'ring' : 730,
'uni0195' : 405,
'figuredash' : 8210,
'uni22EC' : 8940,
'uni0339' : 825,
'uni0338' : 824,
'uni0337' : 823,
'uni0336' : 822,
'uni0335' : 821,
'uni0333' : 819,
'uni0332' : 818,
'uni0331' : 817,
'uni0330' : 816,
'uni01C1' : 449,
'uni01C0' : 448,
'uni01C3' : 451,
'uni01C2' : 450,
'uni2353' : 9043,
'uni0308' : 776,
'uni2218' : 8728,
'uni2219' : 8729,
'uni2216' : 8726,
'uni2217' : 8727,
'uni2214' : 8724,
'uni0309' : 777,
'uni2609' : 9737,
'uni2213' : 8723,
'uni2210' : 8720,
'uni2211' : 8721,
'uni2245' : 8773,
'B' : 66,
'uni25D6' : 9686,
'iacute' : 237,
'uni02E6' : 742,
'uni02E7' : 743,
'uni02E8' : 744,
'uni02E9' : 745,
'uni221D' : 8733,
'uni221E' : 8734,
'Ydieresis' : 376,
'uni221C' : 8732,
'uni22D7' : 8919,
'uni221A' : 8730,
'R' : 82,
'uni24DC' : 9436,
'uni033F' : 831,
'uni033E' : 830,
'uni033C' : 828,
'uni033B' : 827,
'uni033A' : 826,
'b' : 98,
'uni228A' : 8842,
'uni22DB' : 8923,
'uni2554' : 9556,
'uni046B' : 1131,
'uni046A' : 1130,
'r' : 114,
'uni24DB' : 9435,
'Ccedilla' : 199,
'minus' : 8722,
'uni24DA' : 9434,
'uni03F0' : 1008,
'uni03F1' : 1009,
'uni20AC' : 8364,
'uni2276' : 8822,
'uni24C0' : 9408,
'uni0162' : 354,
'uni0163' : 355,
'uni011E' : 286,
'uni011D' : 285,
'uni011C' : 284,
'uni011B' : 283,
'uni0164' : 356,
'uni0165' : 357,
'Lslash' : 321,
'uni0168' : 360,
'uni0169' : 361,
'uni25C9' : 9673,
'uni02E5' : 741,
'uni21C3' : 8643,
'uni24C4' : 9412,
'uni24E2' : 9442,
'uni2277' : 8823,
'uni013A' : 314,
'uni2102' : 8450,
'Uacute' : 218,
'uni2317' : 8983,
'uni2107' : 8455,
'uni221F' : 8735,
'yacute' : 253,
'uni3012' : 12306,
'Ucircumflex' : 219,
'uni015D' : 349,
'quotedbl' : 34,
'uni25D9' : 9689,
'uni2280' : 8832,
'uni22AF' : 8879,
'onehalf' : 189,
'uni221B' : 8731,
'Thorn' : 222,
'uni2226' : 8742,
'M' : 77,
'uni25BA' : 9658,
'uni2463' : 9315,
'uni2336' : 9014,
'eight' : 56,
'uni2236' : 8758,
'multiply' : 215,
'uni210C' : 8460,
'uni210A' : 8458,
'uni21C9' : 8649,
'grave' : 96,
'uni210E' : 8462,
'uni0117' : 279,
'uni016C' : 364,
'uni0115' : 277,
'uni016A' : 362,
'uni016F' : 367,
'uni0112' : 274,
'uni016D' : 365,
'uni016E' : 366,
'Ocircumflex' : 212,
'uni2305' : 8965,
'm' : 109,
'uni24DF' : 9439,
'uni0119' : 281,
'uni0118' : 280,
'uni20A3' : 8355,
'uni20A4' : 8356,
'uni20A7' : 8359,
'uni2288' : 8840,
'uni24C3' : 9411,
'uni251C' : 9500,
'uni228D' : 8845,
'uni222F' : 8751,
'uni222E' : 8750,
'uni222D' : 8749,
'uni222C' : 8748,
'uni222B' : 8747,
'uni222A' : 8746,
'uni255B' : 9563,
'Ugrave' : 217,
'uni24DE' : 9438,
'guilsinglright' : 8250,
'uni250A' : 9482,
'Ntilde' : 209,
'uni0279' : 633,
'questiondown' : 191,
'uni256C' : 9580,
'Atilde' : 195,
'uni0272' : 626,
'uni0273' : 627,
'uni0270' : 624,
'ccedilla' : 231,
'uni0276' : 630,
'uni0277' : 631,
'uni0274' : 628,
'uni0275' : 629,
'uni2252' : 8786,
'uni041F' : 1055,
'uni2250' : 8784,
'Z' : 90,
'uni2256' : 8790,
'uni2257' : 8791,
'copyright' : 169,
'uni2255' : 8789,
'uni043D' : 1085,
'uni043E' : 1086,
'uni043F' : 1087,
'yen' : 165,
'uni041D' : 1053,
'uni043B' : 1083,
'uni043C' : 1084,
'uni21B0' : 8624,
'uni21B1' : 8625,
'uni21B2' : 8626,
'uni21B3' : 8627,
'uni21B4' : 8628,
'uni21B5' : 8629,
'uni21B6' : 8630,
'uni21B7' : 8631,
'uni21B8' : 8632,
'Eacute' : 201,
'uni2311' : 8977,
'uni2310' : 8976,
'uni228F' : 8847,
'uni25DB' : 9691,
'uni21BA' : 8634,
'uni21BB' : 8635,
'uni21BC' : 8636,
'uni2017' : 8215,
'uni21BE' : 8638,
'uni21BF' : 8639,
'uni231C' : 8988,
'H' : 72,
'uni0293' : 659,
'uni2202' : 8706,
'uni22A4' : 8868,
'uni231E' : 8990,
'uni2232' : 8754,
'uni225B' : 8795,
'uni225C' : 8796,
'uni24D9' : 9433,
'uni225A' : 8794,
'uni0438' : 1080,
'uni0439' : 1081,
'uni225D' : 8797,
'uni225E' : 8798,
'uni0434' : 1076,
'X' : 88,
'uni007F' : 127,
'uni0437' : 1079,
'Idieresis' : 207,
'uni0431' : 1073,
'uni0432' : 1074,
'uni0433' : 1075,
'uni22AC' : 8876,
'uni22CD' : 8909,
'uni25A3' : 9635,
'bar' : 124,
'uni24BB' : 9403,
'uni037E' : 894,
'uni027B' : 635,
'h' : 104,
'uni027A' : 634,
'uni027F' : 639,
'uni027D' : 637,
'uni027E' : 638,
'uni2227' : 8743,
'uni2004' : 8196,
'uni2225' : 8741,
'uni2224' : 8740,
'uni2223' : 8739,
'uni2222' : 8738,
'uni2221' : 8737,
'uni2220' : 8736,
'x' : 120,
'uni2323' : 8995,
'uni2559' : 9561,
'uni2558' : 9560,
'uni2229' : 8745,
'uni2228' : 8744,
'udieresis' : 252,
'uni029D' : 669,
'ordfeminine' : 170,
'uni22CB' : 8907,
'uni233D' : 9021,
'uni0428' : 1064,
'uni24C6' : 9414,
'uni22DD' : 8925,
'uni24C7' : 9415,
'uni015C' : 348,
'uni015B' : 347,
'uni015A' : 346,
'uni22AA' : 8874,
'uni015F' : 351,
'uni015E' : 350,
'braceleft' : 123,
'uni24C5' : 9413,
'uni0410' : 1040,
'uni03AA' : 938,
'uni24C2' : 9410,
'uni03AC' : 940,
'uni03AB' : 939,
'macron' : 175,
'uni03AD' : 941,
'uni03AF' : 943,
'uni0294' : 660,
'uni0295' : 661,
'uni0296' : 662,
'uni0297' : 663,
'uni0290' : 656,
'uni0291' : 657,
'uni0292' : 658,
'atilde' : 227,
'Acircumflex' : 194,
'uni2370' : 9072,
'uni24C1' : 9409,
'uni0298' : 664,
'uni0299' : 665,
'Oslash' : 216,
'uni029E' : 670,
'C' : 67,
'quotedblleft' : 8220,
'uni029B' : 667,
'uni029C' : 668,
'uni03A9' : 937,
'uni03A8' : 936,
'S' : 83,
'uni24C9' : 9417,
'uni03A1' : 929,
'uni03A0' : 928,
'exclam' : 33,
'uni03A5' : 933,
'uni03A4' : 932,
'uni03A7' : 935,
'Zcaron' : 381,
'uni2133' : 8499,
'uni2132' : 8498,
'uni0159' : 345,
'uni0158' : 344,
'uni2137' : 8503,
'uni2005' : 8197,
'uni2135' : 8501,
'uni2134' : 8500,
'uni02BA' : 698,
'uni2033' : 8243,
'uni0151' : 337,
'uni0150' : 336,
'uni0157' : 343,
'equal' : 61,
'uni0155' : 341,
'uni0154' : 340,
's' : 115,
'uni233F' : 9023,
'eth' : 240,
'uni24BE' : 9406,
'uni21E9' : 8681,
'uni2060' : 8288,
'Egrave' : 200,
'uni255D' : 9565,
'uni24CD' : 9421,
'uni21E1' : 8673,
'uni21B9' : 8633,
'hyphen' : 45,
'uni01BE' : 446,
'uni01BB' : 443,
'period' : 46,
'igrave' : 236,
'uni01BA' : 442,
'uni2296' : 8854,
'uni2297' : 8855,
'uni2294' : 8852,
'uni2295' : 8853,
'colon' : 58,
'uni2293' : 8851,
'uni2290' : 8848,
'uni2291' : 8849,
'uni032D' : 813,
'uni032E' : 814,
'uni032F' : 815,
'uni032A' : 810,
'uni032B' : 811,
'uni032C' : 812,
'uni231D' : 8989,
'Ecircumflex' : 202,
'uni24D7' : 9431,
'uni25DD' : 9693,
'trademark' : 8482,
'Aacute' : 193,
'cent' : 162,
'uni0445' : 1093,
'uni266E' : 9838,
'uni266D' : 9837,
'uni266B' : 9835,
'uni03C9' : 969,
'uni2003' : 8195,
'uni2047' : 8263,
'lslash' : 322,
'uni03A6' : 934,
'uni2043' : 8259,
'uni250C' : 9484,
'uni2040' : 8256,
'uni255F' : 9567,
'uni24CB' : 9419,
'uni0472' : 1138,
'uni0446' : 1094,
'uni0474' : 1140,
'uni0475' : 1141,
'uni2508' : 9480,
'uni2660' : 9824,
'uni2506' : 9478,
'uni2502' : 9474,
'c' : 99,
'uni2500' : 9472,
'N' : 78,
'uni22A6' : 8870,
'uni21E7' : 8679,
'uni2130' : 8496,
'uni2002' : 8194,
'breve' : 728,
'uni0442' : 1090,
'Oacute' : 211,
'uni229F' : 8863,
'uni25C7' : 9671,
'uni229D' : 8861,
'uni229E' : 8862,
'guillemotleft' : 171,
'uni0329' : 809,
'uni24E5' : 9445,
'uni011F' : 287,
'uni0324' : 804,
'uni0325' : 805,
'uni0326' : 806,
'uni0327' : 807,
'uni0321' : 801,
'uni0322' : 802,
'n' : 110,
'uni2032' : 8242,
'uni2269' : 8809,
'uni2268' : 8808,
'uni0306' : 774,
'uni226B' : 8811,
'uni21EA' : 8682,
'uni0166' : 358,
'uni203B' : 8251,
'uni01B5' : 437,
'idieresis' : 239,
'uni02BC' : 700,
'uni01B0' : 432,
'braceright' : 125,
'seven' : 55,
'uni02BB' : 699,
'uni011A' : 282,
'uni29FB' : 10747,
'brokenbar' : 166,
'uni2036' : 8246,
'uni25C0' : 9664,
'uni0156' : 342,
'uni22D5' : 8917,
'uni0258' : 600,
'ugrave' : 249,
'uni22D6' : 8918,
'uni22D1' : 8913,
'uni2034' : 8244,
'uni22D3' : 8915,
'uni22D2' : 8914,
'uni203C' : 8252,
'uni223E' : 8766,
'uni02BF' : 703,
'uni22D9' : 8921,
'uni22D8' : 8920,
'uni25BD' : 9661,
'uni25BE' : 9662,
'uni25BF' : 9663,
'uni041B' : 1051,
'periodcentered' : 183,
'uni25BC' : 9660,
'uni019E' : 414,
'uni019B' : 411,
'uni019A' : 410,
'uni2007' : 8199,
'uni0391' : 913,
'uni0390' : 912,
'uni0393' : 915,
'uni0392' : 914,
'uni0395' : 917,
'uni0394' : 916,
'uni0397' : 919,
'uni0396' : 918,
'uni0399' : 921,
'uni0398' : 920,
'uni25C8' : 9672,
'uni2468' : 9320,
'sterling' : 163,
'uni22EB' : 8939,
'uni039C' : 924,
'uni039B' : 923,
'uni039E' : 926,
'uni039D' : 925,
'uni039F' : 927,
'I' : 73,
'uni03E1' : 993,
'uni03E0' : 992,
'uni2319' : 8985,
'uni228B' : 8843,
'uni25B5' : 9653,
'uni25B6' : 9654,
'uni22EA' : 8938,
'uni24B9' : 9401,
'uni044E' : 1102,
'uni0199' : 409,
'uni2266' : 8806,
'Y' : 89,
'uni22A2' : 8866,
'Eth' : 208,
'uni266F' : 9839,
'emdash' : 8212,
'uni263B' : 9787,
'uni24BD' : 9405,
'uni22DE' : 8926,
'uni0360' : 864,
'uni2557' : 9559,
'uni22DF' : 8927,
'uni22DA' : 8922,
'uni22DC' : 8924,
'uni0361' : 865,
'i' : 105,
'uni24BF' : 9407,
'uni0362' : 866,
'uni263E' : 9790,
'uni028D' : 653,
'uni2259' : 8793,
'uni0323' : 803,
'uni2265' : 8805,
'daggerdbl' : 8225,
'y' : 121,
'uni010A' : 266,
'plusminus' : 177,
'less' : 60,
'uni21AE' : 8622,
'uni0315' : 789,
'uni230B' : 8971,
'uni21AF' : 8623,
'uni21AA' : 8618,
'uni21AC' : 8620,
'uni21AB' : 8619,
'uni01FB' : 507,
'uni01FC' : 508,
'uni223A' : 8762,
'uni01FA' : 506,
'uni01FF' : 511,
'uni01FD' : 509,
'uni01FE' : 510,
'uni2567' : 9575,
'uni25E0' : 9696,
'uni0104' : 260,
'uni0105' : 261,
'uni0106' : 262,
'uni0107' : 263,
'uni0100' : 256,
'uni0101' : 257,
'uni0102' : 258,
'uni0103' : 259,
'uni2038' : 8248,
'uni2009' : 8201,
'uni2008' : 8200,
'uni0108' : 264,
'uni0109' : 265,
'uni02A1' : 673,
'uni223B' : 8763,
'uni226C' : 8812,
'uni25AC' : 9644,
'uni24D3' : 9427,
'uni21E0' : 8672,
'uni21E3' : 8675,
'Udieresis' : 220,
'uni21E2' : 8674,
'D' : 68,
'uni21E5' : 8677,
'uni2621' : 9761,
'uni21D1' : 8657,
'uni203E' : 8254,
'uni22C6' : 8902,
'uni21E4' : 8676,
'uni010D' : 269,
'uni010E' : 270,
'uni010F' : 271,
'five' : 53,
'T' : 84,
'uni010B' : 267,
'uni010C' : 268,
'uni2605' : 9733,
'uni2663' : 9827,
'uni21E6' : 8678,
'uni24B6' : 9398,
'uni22C1' : 8897,
'oslash' : 248,
'acute' : 180,
'uni01F0' : 496,
'd' : 100,
'OE' : 338,
'uni22E3' : 8931,
'Igrave' : 204,
'uni2308' : 8968,
'uni2309' : 8969,
'uni21A9' : 8617,
't' : 116,
'uni2313' : 8979,
'uni03A3' : 931,
'uni21A4' : 8612,
'uni21A7' : 8615,
'uni21A6' : 8614,
'uni21A1' : 8609,
'uni21A0' : 8608,
'uni21A3' : 8611,
'uni21A2' : 8610,
'parenright' : 41,
'uni256A' : 9578,
'uni25DC' : 9692,
'uni24CE' : 9422,
'uni042C' : 1068,
'uni24E0' : 9440,
'uni042B' : 1067,
'uni0409' : 1033,
'uni0408' : 1032,
'uni24E7' : 9447,
'uni25B4' : 9652,
'uni042A' : 1066,
'uni228E' : 8846,
'uni0401' : 1025,
'adieresis' : 228,
'uni0403' : 1027,
'quotesingle' : 39,
'uni0405' : 1029,
'uni0404' : 1028,
'uni0407' : 1031,
'uni0406' : 1030,
'uni229C' : 8860,
'uni2306' : 8966,
'uni2253' : 8787,
'twodotenleader' : 8229,
'uni2131' : 8497,
'uni21DA' : 8666,
'uni2234' : 8756,
'uni2235' : 8757,
'uni01A5' : 421,
'uni2237' : 8759,
'uni2230' : 8752,
'uni02CC' : 716,
'slash' : 47,
'uni01A0' : 416,
'ellipsis' : 8230,
'uni2299' : 8857,
'uni2238' : 8760,
'numbersign' : 35,
'uni21A8' : 8616,
'uni223D' : 8765,
'uni01AF' : 431,
'uni223F' : 8767,
'uni01AD' : 429,
'uni01AB' : 427,
'odieresis' : 246,
'uni223C' : 8764,
'uni227D' : 8829,
'uni0280' : 640,
'O' : 79,
'uni227E' : 8830,
'uni21A5' : 8613,
'uni22D4' : 8916,
'uni25D4' : 9684,
'uni227F' : 8831,
'uni0435' : 1077,
'uni2302' : 8962,
'uni2669' : 9833,
'uni24E3' : 9443,
'uni2720' : 10016,
'uni22A8' : 8872,
'uni22A9' : 8873,
'uni040A' : 1034,
'uni22A7' : 8871,
'oe' : 339,
'uni040B' : 1035,
'uni040E' : 1038,
'uni22A3' : 8867,
'o' : 111,
'uni040F' : 1039,
'Edieresis' : 203,
'uni25D5' : 9685,
'plus' : 43,
'uni044D' : 1101,
'uni263C' : 9788,
'uni22E6' : 8934,
'uni2283' : 8835,
'uni258C' : 9612,
'uni219E' : 8606,
'uni24E4' : 9444,
'uni2136' : 8502,
'dagger' : 8224,
'uni24B7' : 9399,
'uni219B' : 8603,
'uni22E5' : 8933,
'three' : 51,
'uni210B' : 8459,
'uni2534' : 9524,
'uni24B8' : 9400,
'uni230A' : 8970,
'hungarumlaut' : 733,
'parenleft' : 40,
'uni0148' : 328,
'uni0149' : 329,
'uni2124' : 8484,
'uni2125' : 8485,
'uni2126' : 8486,
'uni2127' : 8487,
'uni0140' : 320,
'uni2129' : 8489,
'uni25C5' : 9669,
'uni0143' : 323,
'uni0144' : 324,
'uni0145' : 325,
'uni0146' : 326,
'uni0147' : 327,
'uni210D' : 8461,
'fraction' : 8260,
'uni2031' : 8241,
'uni2196' : 8598,
'uni2035' : 8245,
'uni24E6' : 9446,
'uni016B' : 363,
'uni24BA' : 9402,
'uni266A' : 9834,
'uni0116' : 278,
'uni2115' : 8469,
'registered' : 174,
'J' : 74,
'uni25DF' : 9695,
'uni25CE' : 9678,
'uni273D' : 10045,
'dieresis' : 168,
'uni212B' : 8491,
'uni0114' : 276,
'uni212D' : 8493,
'uni212E' : 8494,
'uni212F' : 8495,
'uni014A' : 330,
'uni014B' : 331,
'uni014C' : 332,
'uni014D' : 333,
'uni014E' : 334,
'uni014F' : 335,
'uni025E' : 606,
'uni24E8' : 9448,
'uni0111' : 273,
'uni24E9' : 9449,
'Ograve' : 210,
'j' : 106,
'uni2195' : 8597,
'uni2194' : 8596,
'uni2197' : 8599,
'uni2037' : 8247,
'uni2191' : 8593,
'uni2190' : 8592,
'uni2193' : 8595,
'uni2192' : 8594,
'uni29FA' : 10746,
'uni2713' : 10003,
'z' : 122,
'uni2199' : 8601,
'uni2198' : 8600,
'uni2667' : 9831,
'ae' : 230,
'uni0448' : 1096,
'semicolon' : 59,
'uni2666' : 9830,
'uni038F' : 911,
'uni0444' : 1092,
'uni0447' : 1095,
'uni038E' : 910,
'uni0441' : 1089,
'uni038C' : 908,
'uni0443' : 1091,
'uni038A' : 906,
'uni0250' : 592,
'uni0251' : 593,
'uni0252' : 594,
'uni0253' : 595,
'uni0254' : 596,
'at' : 64,
'uni0256' : 598,
'uni0257' : 599,
'uni0167' : 359,
'uni0259' : 601,
'uni228C' : 8844,
'uni2662' : 9826,
'uni0319' : 793,
'uni0318' : 792,
'uni24BC' : 9404,
'uni0402' : 1026,
'uni22EF' : 8943,
'Iacute' : 205,
'uni22ED' : 8941,
'uni22EE' : 8942,
'uni0311' : 785,
'uni0310' : 784,
'uni21E8' : 8680,
'uni0312' : 786,
'percent' : 37,
'uni0317' : 791,
'uni0316' : 790,
'uni21D6' : 8662,
'uni21D7' : 8663,
'uni21D4' : 8660,
'uni21D5' : 8661,
'uni21D2' : 8658,
'uni21D3' : 8659,
'uni21D0' : 8656,
'uni2138' : 8504,
'uni2270' : 8816,
'uni2271' : 8817,
'uni2272' : 8818,
'uni2273' : 8819,
'uni2274' : 8820,
'uni2275' : 8821,
'bracketright' : 93,
'uni21D9' : 8665,
'uni21DF' : 8671,
'uni21DD' : 8669,
'uni21DE' : 8670,
'AE' : 198,
'uni03AE' : 942,
'uni227A' : 8826,
'uni227B' : 8827,
'uni227C' : 8828,
'asterisk' : 42,
'aacute' : 225,
'uni226F' : 8815,
'uni22E2' : 8930,
'uni0386' : 902,
'uni22E0' : 8928,
'uni22E1' : 8929,
'U' : 85,
'uni22E7' : 8935,
'uni22E4' : 8932,
'uni0387' : 903,
'uni031A' : 794,
'eacute' : 233,
'uni22E8' : 8936,
'uni22E9' : 8937,
'uni24D8' : 9432,
'uni025A' : 602,
'uni025B' : 603,
'uni025C' : 604,
'e' : 101,
'uni0128' : 296,
'uni025F' : 607,
'uni2665' : 9829,
'thorn' : 254,
'uni0129' : 297,
'uni253C' : 9532,
'uni25D7' : 9687,
'u' : 117,
'uni0388' : 904,
'uni0389' : 905,
'uni0255' : 597,
'uni0171' : 369,
'uni0384' : 900,
'uni0385' : 901,
'uni044A' : 1098,
'uni252C' : 9516,
'uni044C' : 1100,
'uni044B' : 1099
}
uni2type1 = dict(((v,k) for k,v in six.iteritems(type12uni)))
tex2uni = {
'widehat' : 0x0302,
'widetilde' : 0x0303,
'widebar' : 0x0305,
'langle' : 0x27e8,
'rangle' : 0x27e9,
'perp' : 0x27c2,
'neq' : 0x2260,
'Join' : 0x2a1d,
'leqslant' : 0x2a7d,
'geqslant' : 0x2a7e,
'lessapprox' : 0x2a85,
'gtrapprox' : 0x2a86,
'lesseqqgtr' : 0x2a8b,
'gtreqqless' : 0x2a8c,
'triangleeq' : 0x225c,
'eqslantless' : 0x2a95,
'eqslantgtr' : 0x2a96,
'backepsilon' : 0x03f6,
'precapprox' : 0x2ab7,
'succapprox' : 0x2ab8,
'fallingdotseq' : 0x2252,
'subseteqq' : 0x2ac5,
'supseteqq' : 0x2ac6,
'varpropto' : 0x221d,
'precnapprox' : 0x2ab9,
'succnapprox' : 0x2aba,
'subsetneqq' : 0x2acb,
'supsetneqq' : 0x2acc,
'lnapprox' : 0x2ab9,
'gnapprox' : 0x2aba,
'longleftarrow' : 0x27f5,
'longrightarrow' : 0x27f6,
'longleftrightarrow' : 0x27f7,
'Longleftarrow' : 0x27f8,
'Longrightarrow' : 0x27f9,
'Longleftrightarrow' : 0x27fa,
'longmapsto' : 0x27fc,
'leadsto' : 0x21dd,
'dashleftarrow' : 0x290e,
'dashrightarrow' : 0x290f,
'circlearrowleft' : 0x21ba,
'circlearrowright' : 0x21bb,
'leftrightsquigarrow' : 0x21ad,
'leftsquigarrow' : 0x219c,
'rightsquigarrow' : 0x219d,
'Game' : 0x2141,
'hbar' : 0x0127,
'hslash' : 0x210f,
'ldots' : 0x2026,
'vdots' : 0x22ee,
'doteqdot' : 0x2251,
'doteq' : 8784,
'partial' : 8706,
'gg' : 8811,
'asymp' : 8781,
'blacktriangledown' : 9662,
'otimes' : 8855,
'nearrow' : 8599,
'varpi' : 982,
'vee' : 8744,
'vec' : 8407,
'smile' : 8995,
'succnsim' : 8937,
'gimel' : 8503,
'vert' : 124,
'|' : 124,
'varrho' : 1009,
'P' : 182,
'approxident' : 8779,
'Swarrow' : 8665,
'textasciicircum' : 94,
'imageof' : 8887,
'ntriangleleft' : 8938,
'nleq' : 8816,
'div' : 247,
'nparallel' : 8742,
'Leftarrow' : 8656,
'lll' : 8920,
'oiint' : 8751,
'ngeq' : 8817,
'Theta' : 920,
'origof' : 8886,
'blacksquare' : 9632,
'solbar' : 9023,
'neg' : 172,
'sum' : 8721,
'Vdash' : 8873,
'coloneq' : 8788,
'degree' : 176,
'bowtie' : 8904,
'blacktriangleright' : 9654,
'varsigma' : 962,
'leq' : 8804,
'ggg' : 8921,
'lneqq' : 8808,
'scurel' : 8881,
'stareq' : 8795,
'BbbN' : 8469,
'nLeftarrow' : 8653,
'nLeftrightarrow' : 8654,
'k' : 808,
'bot' : 8869,
'BbbC' : 8450,
'Lsh' : 8624,
'leftleftarrows' : 8647,
'BbbZ' : 8484,
'digamma' : 989,
'BbbR' : 8477,
'BbbP' : 8473,
'BbbQ' : 8474,
'vartriangleright' : 8883,
'succsim' : 8831,
'wedge' : 8743,
'lessgtr' : 8822,
'veebar' : 8891,
'mapsdown' : 8615,
'Rsh' : 8625,
'chi' : 967,
'prec' : 8826,
'nsubseteq' : 8840,
'therefore' : 8756,
'eqcirc' : 8790,
'textexclamdown' : 161,
'nRightarrow' : 8655,
'flat' : 9837,
'notin' : 8713,
'llcorner' : 8990,
'varepsilon' : 949,
'bigtriangleup' : 9651,
'aleph' : 8501,
'dotminus' : 8760,
'upsilon' : 965,
'Lambda' : 923,
'cap' : 8745,
'barleftarrow' : 8676,
'mu' : 956,
'boxplus' : 8862,
'mp' : 8723,
'circledast' : 8859,
'tau' : 964,
'in' : 8712,
'backslash' : 92,
'varnothing' : 8709,
'sharp' : 9839,
'eqsim' : 8770,
'gnsim' : 8935,
'Searrow' : 8664,
'updownarrows' : 8645,
'heartsuit' : 9825,
'trianglelefteq' : 8884,
'ddag' : 8225,
'sqsubseteq' : 8849,
'mapsfrom' : 8612,
'boxbar' : 9707,
'sim' : 8764,
'Nwarrow' : 8662,
'nequiv' : 8802,
'succ' : 8827,
'vdash' : 8866,
'Leftrightarrow' : 8660,
'parallel' : 8741,
'invnot' : 8976,
'natural' : 9838,
'ss' : 223,
'uparrow' : 8593,
'nsim' : 8769,
'hookrightarrow' : 8618,
'Equiv' : 8803,
'approx' : 8776,
'Vvdash' : 8874,
'nsucc' : 8833,
'leftrightharpoons' : 8651,
'Re' : 8476,
'boxminus' : 8863,
'equiv' : 8801,
'Lleftarrow' : 8666,
'thinspace' : 8201,
'll' : 8810,
'Cup' : 8915,
'measeq' : 8798,
'upharpoonleft' : 8639,
'lq' : 8216,
'Upsilon' : 933,
'subsetneq' : 8842,
'greater' : 62,
'supsetneq' : 8843,
'Cap' : 8914,
'L' : 321,
'spadesuit' : 9824,
'lrcorner' : 8991,
'not' : 824,
'bar' : 772,
'rightharpoonaccent' : 8401,
'boxdot' : 8865,
'l' : 322,
'leftharpoondown' : 8637,
'bigcup' : 8899,
'iint' : 8748,
'bigwedge' : 8896,
'downharpoonleft' : 8643,
'textasciitilde' : 126,
'subset' : 8834,
'leqq' : 8806,
'mapsup' : 8613,
'nvDash' : 8877,
'looparrowleft' : 8619,
'nless' : 8814,
'rightarrowbar' : 8677,
'Vert' : 8214,
'downdownarrows' : 8650,
'uplus' : 8846,
'simeq' : 8771,
'napprox' : 8777,
'ast' : 8727,
'twoheaduparrow' : 8607,
'doublebarwedge' : 8966,
'Sigma' : 931,
'leftharpoonaccent' : 8400,
'ntrianglelefteq' : 8940,
'nexists' : 8708,
'times' : 215,
'measuredangle' : 8737,
'bumpeq' : 8783,
'carriagereturn' : 8629,
'adots' : 8944,
'checkmark' : 10003,
'lambda' : 955,
'xi' : 958,
'rbrace' : 125,
'rbrack' : 93,
'Nearrow' : 8663,
'maltese' : 10016,
'clubsuit' : 9827,
'top' : 8868,
'overarc' : 785,
'varphi' : 966,
'Delta' : 916,
'iota' : 953,
'nleftarrow' : 8602,
'candra' : 784,
'supset' : 8835,
'triangleleft' : 9665,
'gtreqless' : 8923,
'ntrianglerighteq' : 8941,
'quad' : 8195,
'Xi' : 926,
'gtrdot' : 8919,
'leftthreetimes' : 8907,
'minus' : 8722,
'preccurlyeq' : 8828,
'nleftrightarrow' : 8622,
'lambdabar' : 411,
'blacktriangle' : 9652,
'kernelcontraction' : 8763,
'Phi' : 934,
'angle' : 8736,
'spadesuitopen' : 9828,
'eqless' : 8924,
'mid' : 8739,
'varkappa' : 1008,
'Ldsh' : 8626,
'updownarrow' : 8597,
'beta' : 946,
'textquotedblleft' : 8220,
'rho' : 961,
'alpha' : 945,
'intercal' : 8890,
'beth' : 8502,
'grave' : 768,
'acwopencirclearrow' : 8634,
'nmid' : 8740,
'nsupset' : 8837,
'sigma' : 963,
'dot' : 775,
'Rightarrow' : 8658,
'turnednot' : 8985,
'backsimeq' : 8909,
'leftarrowtail' : 8610,
'approxeq' : 8778,
'curlyeqsucc' : 8927,
'rightarrowtail' : 8611,
'Psi' : 936,
'copyright' : 169,
'yen' : 165,
'vartriangleleft' : 8882,
'rasp' : 700,
'triangleright' : 9655,
'precsim' : 8830,
'infty' : 8734,
'geq' : 8805,
'updownarrowbar' : 8616,
'precnsim' : 8936,
'H' : 779,
'ulcorner' : 8988,
'looparrowright' : 8620,
'ncong' : 8775,
'downarrow' : 8595,
'circeq' : 8791,
'subseteq' : 8838,
'bigstar' : 9733,
'prime' : 8242,
'lceil' : 8968,
'Rrightarrow' : 8667,
'oiiint' : 8752,
'curlywedge' : 8911,
'vDash' : 8872,
'lfloor' : 8970,
'ddots' : 8945,
'exists' : 8707,
'underbar' : 817,
'Pi' : 928,
'leftrightarrows' : 8646,
'sphericalangle' : 8738,
'coprod' : 8720,
'circledcirc' : 8858,
'gtrsim' : 8819,
'gneqq' : 8809,
'between' : 8812,
'theta' : 952,
'complement' : 8705,
'arceq' : 8792,
'nVdash' : 8878,
'S' : 167,
'wr' : 8768,
'wp' : 8472,
'backcong' : 8780,
'lasp' : 701,
'c' : 807,
'nabla' : 8711,
'dotplus' : 8724,
'eta' : 951,
'forall' : 8704,
'eth' : 240,
'colon' : 58,
'sqcup' : 8852,
'rightrightarrows' : 8649,
'sqsupset' : 8848,
'mapsto' : 8614,
'bigtriangledown' : 9661,
'sqsupseteq' : 8850,
'propto' : 8733,
'pi' : 960,
'pm' : 177,
'dots' : 0x2026,
'nrightarrow' : 8603,
'textasciiacute' : 180,
'Doteq' : 8785,
'breve' : 774,
'sqcap' : 8851,
'twoheadrightarrow' : 8608,
'kappa' : 954,
'vartriangle' : 9653,
'diamondsuit' : 9826,
'pitchfork' : 8916,
'blacktriangleleft' : 9664,
'nprec' : 8832,
'vdots' : 8942,
'curvearrowright' : 8631,
'barwedge' : 8892,
'multimap' : 8888,
'textquestiondown' : 191,
'cong' : 8773,
'rtimes' : 8906,
'rightzigzagarrow' : 8669,
'rightarrow' : 8594,
'leftarrow' : 8592,
'__sqrt__' : 8730,
'twoheaddownarrow' : 8609,
'oint' : 8750,
'bigvee' : 8897,
'eqdef' : 8797,
'sterling' : 163,
'phi' : 981,
'Updownarrow' : 8661,
'backprime' : 8245,
'emdash' : 8212,
'Gamma' : 915,
'i' : 305,
'rceil' : 8969,
'leftharpoonup' : 8636,
'Im' : 8465,
'curvearrowleft' : 8630,
'wedgeq' : 8793,
'fallingdotseq' : 8786,
'curlyeqprec' : 8926,
'questeq' : 8799,
'less' : 60,
'upuparrows' : 8648,
'tilde' : 771,
'textasciigrave' : 96,
'smallsetminus' : 8726,
'ell' : 8467,
'cup' : 8746,
'danger' : 9761,
'nVDash' : 8879,
'cdotp' : 183,
'cdots' : 8943,
'hat' : 770,
'eqgtr' : 8925,
'enspace' : 8194,
'psi' : 968,
'frown' : 8994,
'acute' : 769,
'downzigzagarrow' : 8623,
'ntriangleright' : 8939,
'cupdot' : 8845,
'circleddash' : 8861,
'oslash' : 8856,
'mho' : 8487,
'd' : 803,
'sqsubset' : 8847,
'cdot' : 8901,
'Omega' : 937,
'OE' : 338,
'veeeq' : 8794,
'Finv' : 8498,
't' : 865,
'leftrightarrow' : 8596,
'swarrow' : 8601,
'rightthreetimes' : 8908,
'rightleftharpoons' : 8652,
'lesssim' : 8818,
'searrow' : 8600,
'because' : 8757,
'gtrless' : 8823,
'star' : 8902,
'nsubset' : 8836,
'zeta' : 950,
'dddot' : 8411,
'bigcirc' : 9675,
'Supset' : 8913,
'circ' : 8728,
'slash' : 8725,
'ocirc' : 778,
'prod' : 8719,
'twoheadleftarrow' : 8606,
'daleth' : 8504,
'upharpoonright' : 8638,
'odot' : 8857,
'Uparrow' : 8657,
'O' : 216,
'hookleftarrow' : 8617,
'trianglerighteq' : 8885,
'nsime' : 8772,
'oe' : 339,
'nwarrow' : 8598,
'o' : 248,
'ddddot' : 8412,
'downharpoonright' : 8642,
'succcurlyeq' : 8829,
'gamma' : 947,
'scrR' : 8475,
'dag' : 8224,
'thickspace' : 8197,
'frakZ' : 8488,
'lessdot' : 8918,
'triangledown' : 9663,
'ltimes' : 8905,
'scrB' : 8492,
'endash' : 8211,
'scrE' : 8496,
'scrF' : 8497,
'scrH' : 8459,
'scrI' : 8464,
'rightharpoondown' : 8641,
'scrL' : 8466,
'scrM' : 8499,
'frakC' : 8493,
'nsupseteq' : 8841,
'circledR' : 174,
'circledS' : 9416,
'ngtr' : 8815,
'bigcap' : 8898,
'scre' : 8495,
'Downarrow' : 8659,
'scrg' : 8458,
'overleftrightarrow' : 8417,
'scro' : 8500,
'lnsim' : 8934,
'eqcolon' : 8789,
'curlyvee' : 8910,
'urcorner' : 8989,
'lbrace' : 123,
'Bumpeq' : 8782,
'delta' : 948,
'boxtimes' : 8864,
'overleftarrow' : 8406,
'prurel' : 8880,
'clubsuitopen' : 9831,
'cwopencirclearrow' : 8635,
'geqq' : 8807,
'rightleftarrows' : 8644,
'ac' : 8766,
'ae' : 230,
'int' : 8747,
'rfloor' : 8971,
'risingdotseq' : 8787,
'nvdash' : 8876,
'diamond' : 8900,
'ddot' : 776,
'backsim' : 8765,
'oplus' : 8853,
'triangleq' : 8796,
'check' : 780,
'ni' : 8715,
'iiint' : 8749,
'ne' : 8800,
'lesseqgtr' : 8922,
'obar' : 9021,
'supseteq' : 8839,
'nu' : 957,
'AA' : 8491,
'AE' : 198,
'models' : 8871,
'ominus' : 8854,
'dashv' : 8867,
'omega' : 969,
'rq' : 8217,
'Subset' : 8912,
'rightharpoonup' : 8640,
'Rdsh' : 8627,
'bullet' : 8729,
'divideontimes' : 8903,
'lbrack' : 91,
'textquotedblright' : 8221,
'Colon' : 8759,
'%' : 37,
'$' : 36,
'{' : 123,
'}' : 125,
'_' : 95,
'#' : 35,
'imath' : 0x131,
'circumflexaccent' : 770,
'combiningbreve' : 774,
'combiningoverline' : 772,
'combininggraveaccent' : 768,
'combiningacuteaccent' : 769,
'combiningdiaeresis' : 776,
'combiningtilde' : 771,
'combiningrightarrowabove' : 8407,
'combiningdotabove' : 775,
'to' : 8594,
'succeq' : 8829,
'emptyset' : 8709,
'leftparen' : 40,
'rightparen' : 41,
'bigoplus' : 10753,
'leftangle' : 10216,
'rightangle' : 10217,
'leftbrace' : 124,
'rightbrace' : 125,
'jmath' : 567,
'bigodot' : 10752,
'preceq' : 8828,
'biguplus' : 10756,
'epsilon' : 949,
'vartheta' : 977,
'bigotimes' : 10754,
'guillemotleft' : 171,
'ring' : 730,
'Thorn' : 222,
'guilsinglright' : 8250,
'perthousand' : 8240,
'macron' : 175,
'cent' : 162,
'guillemotright' : 187,
'equal' : 61,
'asterisk' : 42,
'guilsinglleft' : 8249,
'plus' : 43,
'thorn' : 254,
'dagger' : 8224
}
# Each element is a 4-tuple of the form:
# src_start, src_end, dst_font, dst_start
#
stix_virtual_fonts = {
'bb':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'rm', 0x1d538), # A-B
(0x0043, 0x0043, 'rm', 0x2102), # C
(0x0044, 0x0047, 'rm', 0x1d53b), # D-G
(0x0048, 0x0048, 'rm', 0x210d), # H
(0x0049, 0x004d, 'rm', 0x1d540), # I-M
(0x004e, 0x004e, 'rm', 0x2115), # N
(0x004f, 0x004f, 'rm', 0x1d546), # O
(0x0050, 0x0051, 'rm', 0x2119), # P-Q
(0x0052, 0x0052, 'rm', 0x211d), # R
(0x0053, 0x0059, 'rm', 0x1d54a), # S-Y
(0x005a, 0x005a, 'rm', 0x2124), # Z
(0x0061, 0x007a, 'rm', 0x1d552), # a-z
(0x0393, 0x0393, 'rm', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'rm', 0x213f), # \Pi
(0x03a3, 0x03a3, 'rm', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'rm', 0x213d), # \gamma
(0x03c0, 0x03c0, 'rm', 0x213c), # \pi
],
'it':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'it', 0xe154), # A-B
(0x0043, 0x0043, 'it', 0x2102), # C
(0x0044, 0x0044, 'it', 0x2145), # D
(0x0045, 0x0047, 'it', 0xe156), # E-G
(0x0048, 0x0048, 'it', 0x210d), # H
(0x0049, 0x004d, 'it', 0xe159), # I-M
(0x004e, 0x004e, 'it', 0x2115), # N
(0x004f, 0x004f, 'it', 0xe15e), # O
(0x0050, 0x0051, 'it', 0x2119), # P-Q
(0x0052, 0x0052, 'it', 0x211d), # R
(0x0053, 0x0059, 'it', 0xe15f), # S-Y
(0x005a, 0x005a, 'it', 0x2124), # Z
(0x0061, 0x0063, 'it', 0xe166), # a-c
(0x0064, 0x0065, 'it', 0x2146), # d-e
(0x0066, 0x0068, 'it', 0xe169), # f-h
(0x0069, 0x006a, 'it', 0x2148), # i-j
(0x006b, 0x007a, 'it', 0xe16c), # k-z
(0x0393, 0x0393, 'it', 0x213e), # \Gamma (missing in beta STIX fonts)
(0x03a0, 0x03a0, 'it', 0x213f), # \Pi
(0x03a3, 0x03a3, 'it', 0x2140), # \Sigma (missing in beta STIX fonts)
(0x03b3, 0x03b3, 'it', 0x213d), # \gamma (missing in beta STIX fonts)
(0x03c0, 0x03c0, 'it', 0x213c), # \pi
],
'bf':
[
(0x0030, 0x0039, 'rm', 0x1d7d8), # 0-9
(0x0041, 0x0042, 'bf', 0xe38a), # A-B
(0x0043, 0x0043, 'bf', 0x2102), # C
(0x0044, 0x0044, 'bf', 0x2145), # D
(0x0045, 0x0047, 'bf', 0xe38d), # E-G
(0x0048, 0x0048, 'bf', 0x210d), # H
(0x0049, 0x004d, 'bf', 0xe390), # I-M
(0x004e, 0x004e, 'bf', 0x2115), # N
(0x004f, 0x004f, 'bf', 0xe395), # O
(0x0050, 0x0051, 'bf', 0x2119), # P-Q
(0x0052, 0x0052, 'bf', 0x211d), # R
(0x0053, 0x0059, 'bf', 0xe396), # S-Y
(0x005a, 0x005a, 'bf', 0x2124), # Z
(0x0061, 0x0063, 'bf', 0xe39d), # a-c
(0x0064, 0x0065, 'bf', 0x2146), # d-e
(0x0066, 0x0068, 'bf', 0xe3a2), # f-h
(0x0069, 0x006a, 'bf', 0x2148), # i-j
(0x006b, 0x007a, 'bf', 0xe3a7), # k-z
(0x0393, 0x0393, 'bf', 0x213e), # \Gamma
(0x03a0, 0x03a0, 'bf', 0x213f), # \Pi
(0x03a3, 0x03a3, 'bf', 0x2140), # \Sigma
(0x03b3, 0x03b3, 'bf', 0x213d), # \gamma
(0x03c0, 0x03c0, 'bf', 0x213c), # \pi
],
},
'cal':
[
(0x0041, 0x005a, 'it', 0xe22d), # A-Z
],
'circled':
{
'rm':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'rm', 0x24b6), # A-Z
(0x0061, 0x007a, 'rm', 0x24d0) # a-z
],
'it':
[
(0x0030, 0x0030, 'rm', 0x24ea), # 0
(0x0031, 0x0039, 'rm', 0x2460), # 1-9
(0x0041, 0x005a, 'it', 0x24b6), # A-Z
(0x0061, 0x007a, 'it', 0x24d0) # a-z
],
'bf':
[
(0x0030, 0x0030, 'bf', 0x24ea), # 0
(0x0031, 0x0039, 'bf', 0x2460), # 1-9
(0x0041, 0x005a, 'bf', 0x24b6), # A-Z
(0x0061, 0x007a, 'bf', 0x24d0) # a-z
],
},
'frak':
{
'rm':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'it':
[
(0x0041, 0x0042, 'rm', 0x1d504), # A-B
(0x0043, 0x0043, 'rm', 0x212d), # C
(0x0044, 0x0047, 'rm', 0x1d507), # D-G
(0x0048, 0x0048, 'rm', 0x210c), # H
(0x0049, 0x0049, 'rm', 0x2111), # I
(0x004a, 0x0051, 'rm', 0x1d50d), # J-Q
(0x0052, 0x0052, 'rm', 0x211c), # R
(0x0053, 0x0059, 'rm', 0x1d516), # S-Y
(0x005a, 0x005a, 'rm', 0x2128), # Z
(0x0061, 0x007a, 'rm', 0x1d51e), # a-z
],
'bf':
[
(0x0041, 0x005a, 'bf', 0x1d56c), # A-Z
(0x0061, 0x007a, 'bf', 0x1d586), # a-z
],
},
'scr':
[
(0x0041, 0x0041, 'it', 0x1d49c), # A
(0x0042, 0x0042, 'it', 0x212c), # B
(0x0043, 0x0044, 'it', 0x1d49e), # C-D
(0x0045, 0x0046, 'it', 0x2130), # E-F
(0x0047, 0x0047, 'it', 0x1d4a2), # G
(0x0048, 0x0048, 'it', 0x210b), # H
(0x0049, 0x0049, 'it', 0x2110), # I
(0x004a, 0x004b, 'it', 0x1d4a5), # J-K
(0x004c, 0x004c, 'it', 0x2112), # L
(0x004d, 0x003d, 'it', 0x2133), # M
(0x004e, 0x0051, 'it', 0x1d4a9), # N-Q
(0x0052, 0x0052, 'it', 0x211b), # R
(0x0053, 0x005a, 'it', 0x1d4ae), # S-Z
(0x0061, 0x0064, 'it', 0x1d4b6), # a-d
(0x0065, 0x0065, 'it', 0x212f), # e
(0x0066, 0x0066, 'it', 0x1d4bb), # f
(0x0067, 0x0067, 'it', 0x210a), # g
(0x0068, 0x006e, 'it', 0x1d4bd), # h-n
(0x006f, 0x006f, 'it', 0x2134), # o
(0x0070, 0x007a, 'it', 0x1d4c5), # p-z
],
'sf':
{
'rm':
[
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'rm', 0x1d5a0), # A-Z
(0x0061, 0x007a, 'rm', 0x1d5ba), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'rm', 0xe196), # \alpha-\omega
(0x03d1, 0x03d1, 'rm', 0xe1b0), # theta variant
(0x03d5, 0x03d5, 'rm', 0xe1b1), # phi variant
(0x03d6, 0x03d6, 'rm', 0xe1b3), # pi variant
(0x03f1, 0x03f1, 'rm', 0xe1b2), # rho variant
(0x03f5, 0x03f5, 'rm', 0xe1af), # lunate epsilon
(0x2202, 0x2202, 'rm', 0xe17c), # partial differential
],
'it':
[
# These numerals are actually upright. We don't actually
# want italic numerals ever.
(0x0030, 0x0039, 'rm', 0x1d7e2), # 0-9
(0x0041, 0x005a, 'it', 0x1d608), # A-Z
(0x0061, 0x007a, 'it', 0x1d622), # a-z
(0x0391, 0x03a9, 'rm', 0xe17d), # \Alpha-\Omega
(0x03b1, 0x03c9, 'it', 0xe1d8), # \alpha-\omega
(0x03d1, 0x03d1, 'it', 0xe1f2), # theta variant
(0x03d5, 0x03d5, 'it', 0xe1f3), # phi variant
(0x03d6, 0x03d6, 'it', 0xe1f5), # pi variant
(0x03f1, 0x03f1, 'it', 0xe1f4), # rho variant
(0x03f5, 0x03f5, 'it', 0xe1f1), # lunate epsilon
],
'bf':
[
(0x0030, 0x0039, 'bf', 0x1d7ec), # 0-9
(0x0041, 0x005a, 'bf', 0x1d5d4), # A-Z
(0x0061, 0x007a, 'bf', 0x1d5ee), # a-z
(0x0391, 0x03a9, 'bf', 0x1d756), # \Alpha-\Omega
(0x03b1, 0x03c9, 'bf', 0x1d770), # \alpha-\omega
(0x03d1, 0x03d1, 'bf', 0x1d78b), # theta variant
(0x03d5, 0x03d5, 'bf', 0x1d78d), # phi variant
(0x03d6, 0x03d6, 'bf', 0x1d78f), # pi variant
(0x03f0, 0x03f0, 'bf', 0x1d78c), # kappa variant
(0x03f1, 0x03f1, 'bf', 0x1d78e), # rho variant
(0x03f5, 0x03f5, 'bf', 0x1d78a), # lunate epsilon
(0x2202, 0x2202, 'bf', 0x1d789), # partial differential
(0x2207, 0x2207, 'bf', 0x1d76f), # \Nabla
],
},
'tt':
[
(0x0030, 0x0039, 'rm', 0x1d7f6), # 0-9
(0x0041, 0x005a, 'rm', 0x1d670), # A-Z
(0x0061, 0x007a, 'rm', 0x1d68a) # a-z
],
}
| 35.080249 | 82 | 0.339841 |
acdeb3083accec8b9c551c4beef005ac5d96cc83 | 6,139 | py | Python | sphinx/util/jsdump.py | ocegidalm/sphinx-francais | fd9ad53bb756244e0417a013ede70757c004212d | [
"BSD-2-Clause"
] | null | null | null | sphinx/util/jsdump.py | ocegidalm/sphinx-francais | fd9ad53bb756244e0417a013ede70757c004212d | [
"BSD-2-Clause"
] | null | null | null | sphinx/util/jsdump.py | ocegidalm/sphinx-francais | fd9ad53bb756244e0417a013ede70757c004212d | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sphinx.util.jsdump
~~~~~~~~~~~~~~~~~~
This module implements a simple JavaScript serializer.
Uses the basestring encode function from simplejson by Bob Ippolito.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from six import integer_types, string_types
if False:
# For type annotation
from typing import Any, Dict, IO, List, Match, Union # NOQA
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+')
_name_re = re.compile(r'[a-zA-Z_]\w*')
_nameonly_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$')
# escape \, ", control characters and everything outside ASCII
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
ESCAPE_DICT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
ESCAPED = re.compile(r'\\u.{4}|\\.')
def encode_string(s):
# type: (str) -> str
def replace(match):
# type: (Match) -> unicode
s = match.group(0)
try:
return ESCAPE_DICT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' # type: ignore
def decode_string(s):
# type: (str) -> str
return ESCAPED.sub(lambda m: eval('"' + m.group() + '"'), s)
reswords = set("""\
abstract else instanceof switch
boolean enum int synchronized
break export interface this
byte extends long throw
case false native throws
catch final new transient
char finally null true
class float package try
const for private typeof
continue function protected var
debugger goto public void
default if return volatile
delete implements short while
do import static with
double in super""".split())
def dumps(obj, key=False):
# type: (Any, bool) -> str
if key:
if not isinstance(obj, string_types):
obj = str(obj)
if _nameonly_re.match(obj) and obj not in reswords:
return obj # return it as a bare word
else:
return encode_string(obj)
if obj is None:
return 'null'
elif obj is True or obj is False:
return obj and 'true' or 'false'
elif isinstance(obj, integer_types + (float,)): # type: ignore
return str(obj)
elif isinstance(obj, dict):
return '{%s}' % ','.join(sorted('%s:%s' % (
dumps(key, True),
dumps(value)
) for key, value in obj.items()))
elif isinstance(obj, set):
return '[%s]' % ','.join(sorted(dumps(x) for x in obj))
elif isinstance(obj, (tuple, list)):
return '[%s]' % ','.join(dumps(x) for x in obj)
elif isinstance(obj, string_types):
return encode_string(obj) # type: ignore
raise TypeError(type(obj))
def dump(obj, f):
# type: (Any, IO) -> None
f.write(dumps(obj))
def loads(x):
# type: (str) -> Any
"""Loader that can read the JS subset the indexer produces."""
nothing = object()
i = 0
n = len(x)
stack = [] # type: List[Union[List, Dict]]
obj = nothing # type: Any
key = False
keys = []
while i < n:
c = x[i]
if c == '{':
obj = {}
stack.append(obj)
key = True
keys.append(nothing)
i += 1
elif c == '[':
obj = []
stack.append(obj)
key = False
keys.append(nothing)
i += 1
elif c in '}]':
if key:
if keys[-1] is not nothing:
raise ValueError("unfinished dict")
# empty dict
key = False
oldobj = stack.pop()
keys.pop()
if stack:
obj = stack[-1]
if isinstance(obj, dict):
if keys[-1] is nothing:
raise ValueError("invalid key object", oldobj)
obj[keys[-1]] = oldobj
else:
obj.append(oldobj)
else:
break
i += 1
elif c == ',':
if key:
raise ValueError("multiple keys")
if isinstance(obj, dict):
key = True
i += 1
elif c == ':':
if not isinstance(obj, dict):
raise ValueError("colon in list")
i += 1
if not key:
raise ValueError("multiple values")
key = False
else:
y = None # type: Any
m = _str_re.match(x, i)
if m:
y = decode_string(m.group()[1:-1])
else:
m = _int_re.match(x, i)
if m:
y = int(m.group())
else:
m = _name_re.match(x, i)
if m:
y = m.group()
if y == 'true':
y = True
elif y == 'false':
y = False
elif y == 'null':
y = None
elif not key:
raise ValueError("bareword as value")
else:
raise ValueError("read error at pos %d" % i)
i = m.end()
if isinstance(obj, dict):
if key:
keys[-1] = y
else:
obj[keys[-1]] = y
key = False
else:
obj.append(y)
if obj is nothing:
raise ValueError("nothing loaded from string")
return obj
def load(f):
# type: (IO) -> Any
return loads(f.read())
| 28.686916 | 72 | 0.460336 |
acdeb3691a65b2817e30d23e8069b520bc047e63 | 315 | py | Python | movie_rating/movies/templatetags/movies_tags.py | AnwarRezk/Graduation-Project-part-1 | 4ebfd3390e4a827744ea2dc15c61f6922841601d | [
"MIT"
] | null | null | null | movie_rating/movies/templatetags/movies_tags.py | AnwarRezk/Graduation-Project-part-1 | 4ebfd3390e4a827744ea2dc15c61f6922841601d | [
"MIT"
] | null | null | null | movie_rating/movies/templatetags/movies_tags.py | AnwarRezk/Graduation-Project-part-1 | 4ebfd3390e4a827744ea2dc15c61f6922841601d | [
"MIT"
] | 1 | 2021-03-26T10:50:21.000Z | 2021-03-26T10:50:21.000Z | from django import template
register = template.Library()
@register.simple_tag
def url_replace(request, field, value):
d = request.GET.copy()
d[field] = value
return d.urlencode()
@register.simple_tag
def url_delete(request, field):
d = request.GET.copy()
del d[field]
return d.urlencode() | 22.5 | 39 | 0.701587 |
acdeb43357ab00e79d7e2caf10a8416ef98a0567 | 478 | py | Python | examples/simple_variable_robot.py | SatoshiIwasada/BlueDot | e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a | [
"MIT"
] | 112 | 2017-03-27T17:23:17.000Z | 2022-03-13T09:51:43.000Z | examples/simple_variable_robot.py | SatoshiIwasada/BlueDot | e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a | [
"MIT"
] | 109 | 2017-03-29T11:19:54.000Z | 2022-02-03T14:18:15.000Z | examples/simple_variable_robot.py | SatoshiIwasada/BlueDot | e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a | [
"MIT"
] | 40 | 2017-03-30T23:23:27.000Z | 2022-01-21T17:09:11.000Z | from bluedot import BlueDot
from gpiozero import Robot
from signal import pause
bd = BlueDot()
robot = Robot(left=(10, 9), right=(8, 7))
def move(pos):
if pos.top:
robot.forward(pos.distance)
elif pos.bottom:
robot.backward(pos.distance)
elif pos.left:
robot.left(pos.distance)
elif pos.right:
robot.right(pos.distance)
def stop():
robot.stop()
bd.when_pressed = move
bd.when_moved = move
bd.when_released = stop
pause()
| 18.384615 | 41 | 0.66318 |
acdeb4ec27369fa1e92a405a07d259e08554c045 | 1,057 | py | Python | setup.py | Blankpanda/GE_wrapper | 3ead6eb46c594ac6a246e6614795d1c5f36cbe96 | [
"MIT"
] | 1 | 2019-06-05T22:56:19.000Z | 2019-06-05T22:56:19.000Z | setup.py | Blankpanda/GE_wrapper | 3ead6eb46c594ac6a246e6614795d1c5f36cbe96 | [
"MIT"
] | 1 | 2016-02-27T06:31:24.000Z | 2019-04-24T17:04:26.000Z | setup.py | Blankpanda/GEw | 3ead6eb46c594ac6a246e6614795d1c5f36cbe96 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
from os import path
setup(
name='gew',
version='0.0.17',
description='Old School RuneScape Grand Exchange wrapper',
long_description='',
url='https://github.com/Blankpanda/GE_wrapper',
author='Caleb Ellis',
author_email='elliscaleb1998@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='api development',
packages=find_packages(exclude=['examples', 'scripts']),
include_package_data=True,
install_requires=[''],
package_data={'gew' : ['items.json']},
data_files=['gew/items.json']
)
| 30.2 | 62 | 0.630085 |
acdeb5274d2c50da87f81181774349ab7a26267b | 13,450 | py | Python | simpleoncall/views.py | simpleoncall/simpleoncall | ffc247045c7ce357871899c84fdfc61f4add06a9 | [
"MIT"
] | 1 | 2016-01-11T21:37:44.000Z | 2016-01-11T21:37:44.000Z | simpleoncall/views.py | simpleoncall/simpleoncall | ffc247045c7ce357871899c84fdfc61f4add06a9 | [
"MIT"
] | 48 | 2015-01-04T16:04:20.000Z | 2015-01-25T20:53:49.000Z | simpleoncall/views.py | simpleoncall/simpleoncall | ffc247045c7ce357871899c84fdfc61f4add06a9 | [
"MIT"
] | null | null | null | import contextlib
import datetime
import json
import StringIO
from django.contrib.auth import logout as logout_user
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest
from django.db.models import Count
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.http import urlencode, urlquote
from django_ical import feedgenerator
from simpleoncall.forms.auth import AuthenticationForm, RegistrationForm
from simpleoncall.forms.account import EditAccountForm, ChangePasswordForm
from simpleoncall.forms.schedule import TeamScheduleForm
from simpleoncall.forms.team import CreateTeamForm, SelectTeamForm, InviteTeamForm
from simpleoncall.decorators import require_authentication, require_selected_team
from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule
from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType
@require_authentication()
@require_selected_team()
def dashboard(request):
end = timezone.now()
start = end - datetime.timedelta(hours=12)
date_added__range = (start, end)
alerts = Alert.objects.filter(team=request.team, date_added__range=date_added__range).order_by('-date_added')[:10]
alert_statuses = Alert.objects.filter(
team=request.team, date_added__range=date_added__range
).values('status').annotate(total=Count('status'))
alert_times = Alert.objects.filter(
team=request.team, date_added__range=date_added__range
).values('date_added').annotate(total=Count('date_added')).order_by('-date_added')
alert_timeseries = {}
while start <= end:
bucket = start - datetime.timedelta(minutes=start.minute % 60,
seconds=start.second,
microseconds=start.microsecond)
alert_timeseries[bucket.strftime('%s')] = 0
start += datetime.timedelta(minutes=60)
for alert in alert_times:
added = alert['date_added']
bucket = added - datetime.timedelta(minutes=added.minute % 60,
seconds=added.second,
microseconds=added.microsecond)
alert_timeseries[bucket.strftime('%s')] += alert['total']
context = {
'title': 'Dashboard',
'alerts': alerts,
'statuses': dict((a['status'], a['total']) for a in alert_statuses),
'timeseries': json.dumps(alert_timeseries),
}
return render(request, 'dashboard.html', context)
def login(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('dashboard'))
context = {
'login_form': AuthenticationForm(),
'register_form': RegistrationForm(),
'login': True,
'title': 'Login',
}
return render(request, 'login.html', context)
def register(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('dashboard'))
context = {
'login_form': AuthenticationForm(),
'register_form': RegistrationForm(),
'register': True,
'title': 'Register',
'next': urlquote(request.GET.get('next')),
}
return render(request, 'login.html', context)
def logout(request):
logout_user(request)
return HttpResponseRedirect(reverse('login'))
@require_authentication()
@require_selected_team()
def settings(request):
api_keys = APIKey.objects.filter(team=request.team)
members = TeamMember.objects.filter(team=request.team)
context = {
'title': '%s Settings' % (request.team.name, ),
'api_keys': api_keys,
'members': members,
'invite_team_form': InviteTeamForm(),
}
return render(request, 'settings.html', context)
@require_authentication()
def account(request):
alerts = request.user.get_notification_settings()
if not alerts:
alerts = [
NotificationSetting(id=0, type=NotificationType.EMAIL, time=0)
]
context = {
'title': 'Account',
'edit_account_form': EditAccountForm(instance=request.user),
'change_password_form': ChangePasswordForm(instance=request.user),
'alerts': alerts,
}
return render(request, 'account.html', context)
@require_authentication()
@require_selected_team()
def alerts(request):
alert_count = Alert.objects.filter(team=request.team).count()
alerts = Alert.objects.filter(team=request.team).order_by('-date_updated')[:10]
context = {
'title': 'Alerts',
'alert_count': alert_count,
'alerts': alerts,
}
return render(request, 'alerts.html', context)
@require_authentication()
@require_selected_team()
def schedule(request):
schedule = request.team.get_active_schedule()
oncall = None
if schedule:
oncall = schedule.get_currently_on_call()
context = {
'title': 'Schedule',
'schedule': schedule,
'oncall': oncall,
}
return render(request, 'schedule.html', context)
@require_authentication(require_team=False)
def create_team(request):
create_team_form = CreateTeamForm(request.POST or None)
if create_team_form.is_valid():
team = create_team_form.save(request)
messages.success(request, 'New team %s created' % team.name)
return HttpResponseRedirect(reverse('dashboard'))
context = {
'title': 'Create New Team',
'create_team_form': create_team_form,
}
return render(request, 'team/create.html', context)
@require_authentication()
def select_team(request):
select_team_form = SelectTeamForm(request.POST or None, request.user)
if select_team_form.is_valid():
team = select_team_form.save(request)
messages.success(request, 'Team changed to %s' % team.name)
return HttpResponseRedirect(reverse('dashboard'))
context = {
'title': 'Select Team',
'select_team_form': select_team_form,
}
return render(request, 'team/select.html', context)
def invite_accept(request):
code = request.GET.get('code')
email = request.GET.get('email')
if not code or not email:
return HttpResponseRedirect(reverse('dashboard'))
invite = TeamInvite.objects.get(invite_code=code, email=email)
if not invite:
return HttpResponseRedirect(reverse('dashboard'))
user = User.objects.get(email=email)
if user:
try:
team_member = TeamMember.objects.get(team=invite.team, user=user)
except ObjectDoesNotExist:
team_member = None
if team_member:
messages.warning(request, 'already a member of team %s' % (invite.team.name, ))
else:
team_member = TeamMember(team=invite.team, user=user)
team_member.save()
messages.success(request, 'added to team %s' % (invite.team.name, ))
else:
args = {
'code': code,
'email': email,
}
next = '%s?%s' % (reverse('invite-accept'), urlencode(args))
redirect = '%s?next=%s' % (reverse('register'), urlquote(next))
return HttpResponseRedirect(redirect)
return HttpResponseRedirect(reverse('dashboard'))
@require_authentication()
@require_selected_team()
def alert_ack(request, alert_id):
alert = Alert.objects.get(id=alert_id)
if not alert:
messages.error(request, 'Alert %s was not found' % (alert_id, ))
elif alert.status == EventStatus.ACKNOWLEDGED:
messages.warning(request, 'Alert %s already acknowledged' % (alert_id, ))
else:
alert.status = EventStatus.ACKNOWLEDGED
alert.save(user=request.user)
messages.success(request, 'Alert %s was acknowledged' % (alert_id, ))
return HttpResponseRedirect(reverse('alerts'))
@require_authentication()
@require_selected_team()
def alert_resolve(request, alert_id):
alert = Alert.objects.get(id=alert_id, team=request.team)
if not alert:
messages.error(request, 'Alert %s was not found' % (alert_id, ))
elif alert.status == EventStatus.RESOLVED:
messages.warning(request, 'Alert %s already resolved' % (alert_id, ))
else:
alert.status = EventStatus.RESOLVED
alert.save(user=request.user)
messages.success(request, 'Alert %s was resolved' % (alert_id, ))
return HttpResponseRedirect(reverse('alerts'))
@require_authentication()
@require_selected_team()
def alert_view(request, alert_id):
alert = Alert.objects.get(id=alert_id, team=request.team)
if not alert:
messages.error(request, 'Alert %s was not found' % (alert_id, ))
return HttpResponseRedirect(reverse('dashboard'))
context = {
'title': alert.title,
'event': alert,
}
return render(request, 'alert.html', context)
@require_authentication()
@require_selected_team()
def edit_schedule(request):
msg = None
schedule_id = None
if 'schedule_id' in request.POST:
schedule_id = int(request.POST['schedule_id'])
dummy_schedule = TeamSchedule(team=request.team)
data = None if schedule_id else request.POST or None
new_schedule_form = TeamScheduleForm(request.team, data, instance=dummy_schedule)
saved = False
if request.method == 'POST' and not schedule_id:
if new_schedule_form.is_valid():
new_schedule_form.save()
saved = True
msg = 'New Schedule Added'
schedule_forms = []
for schedule in request.team.get_schedules():
data = None
if schedule.id == schedule_id:
data = request.POST
schedule_form = TeamScheduleForm(request.team, data, instance=schedule)
if data and schedule_form.is_valid():
schedule_form.save()
msg = 'Schedule Updated'
schedule_forms.append(schedule_form)
if msg:
messages.success(request, msg)
context = {
'title': 'Edit Schedule',
'active_schedule': request.team.get_active_schedule(),
'schedule_forms': schedule_forms,
'new_schedule_form': new_schedule_form,
'hidden_schedule_form': not saved or request.method != 'POST',
}
return render(request, 'edit_schedule.html', context)
@require_authentication()
@require_selected_team()
def delete_schedule(request):
id = request.GET.get('id')
if id:
schedule = TeamSchedule.objects.get(team=request.team, id=id)
schedule.delete()
messages.success(request, 'Schedule %s Deleted' % (schedule.name, ))
else:
messages.error(request, 'Unknown Schedule Id')
return HttpResponseRedirect(reverse('edit-schedule'))
@require_authentication()
@require_selected_team()
def partial(request, partial):
context = {
'request': request,
'user': request.user,
'team': request.team,
}
html = render_to_string('partials/%s.html' % (partial, ), context)
return JsonResponse({'html': html})
@require_authentication()
@require_selected_team()
def team_calendar(request):
schedule = request.team.get_active_schedule()
if not schedule:
return Http404('Unkown Calendar')
feed = feedgenerator.ICal20Feed(
title='Team %s On-Call Schedule %s' % (request.team.name, schedule.name),
link=request.build_absolute_uri(request.path),
description='Team %s On-Call Schedule %s' % (request.team.name, schedule.name),
language='en',
subtitle='Generated by SimpleOnCall',
author_email='service@simpleoncall.com',
author_link='http://simpleoncall.com',
author_name='SimpleOnCall',
feed_url=request.build_absolute_uri(request.path)
)
now = timezone.now()
starting_time = datetime.datetime(now.year, now.month, now.day, schedule.starting_time, tzinfo=timezone.utc)
next_start_time = None
currently_oncall = None
for i in xrange(90):
now = starting_time + datetime.timedelta(days=i)
oncall = schedule.get_currently_on_call(now)
if next_start_time is None:
next_start_time = now
currently_oncall = oncall
elif currently_oncall.id != oncall.id:
feed.add_item(
title='%s On-Call' % (oncall.get_display_name(), ),
link=request.build_absolute_uri(reverse('schedule')),
description='%s On-Call' % (currently_oncall.get_display_name(), ),
start_datetime=next_start_time,
end_datetime=now
)
next_start_time = now
currently_oncall = oncall
feed.add_item(
title='%s On-Call' % (oncall.get_display_name(), ),
link=request.build_absolute_uri(reverse('schedule')),
description='%s On-Call' % (currently_oncall.get_display_name(), ),
start_datetime=next_start_time,
end_datetime=now
)
results = None
with contextlib.closing(StringIO.StringIO()) as output:
feed.write(output, 'utf-8')
results = output.getvalue()
if results is not None:
return HttpResponse(results, content_type='text/calendar; charset=utf-8')
return HttpResponseBadRequest('Could not generate iCal at this time')
| 34.311224 | 118 | 0.667658 |
acdeb5cbb9e00c545f90adf201bd7eca9a4596e3 | 28,892 | py | Python | source/deepsecurity/api/computer_intrusion_prevention_application_type_details_api.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:09.000Z | 2021-10-30T16:40:09.000Z | source/deepsecurity/api/computer_intrusion_prevention_application_type_details_api.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-07-28T20:19:03.000Z | 2021-07-28T20:19:03.000Z | source/deepsecurity/api/computer_intrusion_prevention_application_type_details_api.py | felipecosta09/cloudone-workload-controltower-lifecycle | 7927c84d164058b034fc872701b5ee117641f4d1 | [
"Apache-2.0"
] | 1 | 2021-10-30T16:40:02.000Z | 2021-10-30T16:40:02.000Z | # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class ComputerIntrusionPreventionApplicationTypeDetailsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def describe_intrusion_prevention_application_type_on_computer(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Describe an intrusion prevention application type # noqa: E501
Describe an intrusion prevention application type including computer-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_intrusion_prevention_application_type_on_computer(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
return data
def describe_intrusion_prevention_application_type_on_computer_with_http_info(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Describe an intrusion prevention application type # noqa: E501
Describe an intrusion prevention application type including computer-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'application_type_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_intrusion_prevention_application_type_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `describe_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type_id' is set
if ('application_type_id' not in params or
params['application_type_id'] is None):
raise ValueError("Missing the required parameter `application_type_id` when calling `describe_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_intrusion_prevention_application_type_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `describe_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'application_type_id' in params and not re.search('\\d+', str(params['application_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `application_type_id` when calling `describe_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
if 'application_type_id' in params:
path_params['applicationTypeID'] = params['application_type_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes/{applicationTypeID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_intrusion_prevention_application_types_on_computer(self, computer_id, api_version, **kwargs): # noqa: E501
"""List intrusion prevention application types # noqa: E501
Lists all intrusion prevention application types assigned to a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_intrusion_prevention_application_types_on_computer(computer_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only application types assigned to the current computer.
:return: ApplicationTypes
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_intrusion_prevention_application_types_on_computer_with_http_info(computer_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.list_intrusion_prevention_application_types_on_computer_with_http_info(computer_id, api_version, **kwargs) # noqa: E501
return data
def list_intrusion_prevention_application_types_on_computer_with_http_info(self, computer_id, api_version, **kwargs): # noqa: E501
"""List intrusion prevention application types # noqa: E501
Lists all intrusion prevention application types assigned to a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_intrusion_prevention_application_types_on_computer_with_http_info(computer_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only application types assigned to the current computer.
:return: ApplicationTypes
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_intrusion_prevention_application_types_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `list_intrusion_prevention_application_types_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_intrusion_prevention_application_types_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `list_intrusion_prevention_application_types_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationTypes', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_intrusion_prevention_application_type_on_computer(self, computer_id, application_type_id, application_type, api_version, **kwargs): # noqa: E501
"""Modify an intrusion prevention application type # noqa: E501
Modify an intrusion prevention application type assigned to a computer. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_intrusion_prevention_application_type_on_computer(computer_id, application_type_id, application_type, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to modify. (required)
:param ApplicationType application_type: The settings of the application type to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, application_type, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, application_type, api_version, **kwargs) # noqa: E501
return data
def modify_intrusion_prevention_application_type_on_computer_with_http_info(self, computer_id, application_type_id, application_type, api_version, **kwargs): # noqa: E501
"""Modify an intrusion prevention application type # noqa: E501
Modify an intrusion prevention application type assigned to a computer. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, application_type, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to modify. (required)
:param ApplicationType application_type: The settings of the application type to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'application_type_id', 'application_type', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_intrusion_prevention_application_type_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type_id' is set
if ('application_type_id' not in params or
params['application_type_id'] is None):
raise ValueError("Missing the required parameter `application_type_id` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type' is set
if ('application_type' not in params or
params['application_type'] is None):
raise ValueError("Missing the required parameter `application_type` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `modify_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'application_type_id' in params and not re.search('\\d+', str(params['application_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `application_type_id` when calling `modify_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
if 'application_type_id' in params:
path_params['applicationTypeID'] = params['application_type_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'application_type' in params:
body_params = params['application_type']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes/{applicationTypeID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_intrusion_prevention_application_type_on_computer(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Reset intrusion prevention application type overrides # noqa: E501
Remove all overrides for an intrusion prevention application type from a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_intrusion_prevention_application_type_on_computer(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.reset_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
return data
def reset_intrusion_prevention_application_type_on_computer_with_http_info(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Reset intrusion prevention application type overrides # noqa: E501
Remove all overrides for an intrusion prevention application type from a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'application_type_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_intrusion_prevention_application_type_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `reset_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type_id' is set
if ('application_type_id' not in params or
params['application_type_id'] is None):
raise ValueError("Missing the required parameter `application_type_id` when calling `reset_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `reset_intrusion_prevention_application_type_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `reset_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'application_type_id' in params and not re.search('\\d+', str(params['application_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `application_type_id` when calling `reset_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
if 'application_type_id' in params:
path_params['applicationTypeID'] = params['application_type_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes/{applicationTypeID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 55.032381 | 311 | 0.664613 |
acdeb619e2772690c3b449779b6e5c418dd401e4 | 1,832 | py | Python | StrToInt.py | Vman45/MyAssistantOS-Raspbian | 7ce9e7ee13e5840ff77a376107f3e5b32793cd1c | [
"MIT"
] | null | null | null | StrToInt.py | Vman45/MyAssistantOS-Raspbian | 7ce9e7ee13e5840ff77a376107f3e5b32793cd1c | [
"MIT"
] | 1 | 2020-08-01T18:10:03.000Z | 2020-08-27T06:15:23.000Z | StrToInt.py | Vman45/MyAssistantOS-Raspbian | 7ce9e7ee13e5840ff77a376107f3e5b32793cd1c | [
"MIT"
] | null | null | null | # Guinea pig 1.0
# MIT License
# Allowed also as a commercial use, but only with crediting author, like "c MyTja Team"
def convert(lst):
res = sum(d * 10**i for i, d in enumerate(lst[::-1]))
return(res)
def StrToInt(string):
intcount = 0
integers = []
print(len(string))
while (intcount < (len(string))): #this is a loop
i = string[intcount]
# print(i) -- enable this for debugging
if i=="1":
integers.append(1)
elif i=="2":
integers.append(2)
elif i=="3":
integers.append(3)
elif i=="4":
integers.append(4)
elif i=="5":
integers.append(5)
elif i=="6":
integers.append(6)
elif i=="7":
integers.append(7)
elif i=="8":
integers.append(8)
elif i=="9":
integers.append(9)
elif i=="0":
integers.append(0)
print(i)
intcount = intcount + 1
i = string.lower()
if i=="zero":
integers.append(0)
elif string=="one":
integers.append(1)
elif string=="two":
integers.append(2)
elif string=="three":
integers.append(3)
elif string=="four":
integers.append(4)
elif string=="five":
integers.append(5)
elif string=="six":
integers.append(6)
elif string=="seven":
integers.append(7)
elif string=="eight":
integers.append(8)
elif string=="nine":
integers.append(9)
elif string=="ten":
integers.append(10)
else:
return "This string contains unvalid symbols. Check if you have symbols or unvalid letters in string."
intfinal = convert(integers)
return intfinal
| 27.757576 | 111 | 0.514738 |
acdeb635f707d9d65c51768f6ee5c38906eb472f | 23 | py | Python | src/init.py | Adityan-compile/Wikipedia-Content-Scraper | b78f2db1b9a8710fe0014f96c67b8b9231ce4ca7 | [
"MIT"
] | null | null | null | src/init.py | Adityan-compile/Wikipedia-Content-Scraper | b78f2db1b9a8710fe0014f96c67b8b9231ce4ca7 | [
"MIT"
] | null | null | null | src/init.py | Adityan-compile/Wikipedia-Content-Scraper | b78f2db1b9a8710fe0014f96c67b8b9231ce4ca7 | [
"MIT"
] | null | null | null | __author__ = "Adityan"
| 11.5 | 22 | 0.73913 |
acdeb7250f15e0453083622fb74f00b8668d646d | 2,118 | py | Python | utils/jifen.py | haygcao/UnicomDailyTask | 08bb6c7de320a9355dfff066ebff9bf72d7619b4 | [
"MIT"
] | 148 | 2021-09-13T03:20:28.000Z | 2022-03-30T02:45:44.000Z | utils/jifen.py | haygcao/UnicomDailyTask | 08bb6c7de320a9355dfff066ebff9bf72d7619b4 | [
"MIT"
] | 91 | 2021-09-13T03:20:05.000Z | 2022-03-31T16:57:17.000Z | utils/jifen.py | haygcao/UnicomDailyTask | 08bb6c7de320a9355dfff066ebff9bf72d7619b4 | [
"MIT"
] | 88 | 2021-09-14T09:33:42.000Z | 2022-03-30T14:31:37.000Z | # -*- coding: utf8 -*-
import math
import json
import base64
from random import random
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad, unpad
from Crypto.Cipher import ARC4
def secretkeyArray():
keyArr = []
chars = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'
]
for i in range(5):
nums = ""
for j in range(16):
id_ = math.floor(random() * 62)
nums += chars[id_]
keyArr.append(nums)
return keyArr
def encrypt_free_login_params(params):
keyArr = secretkeyArray()
keyrdm = math.floor(random() * 5)
key = keyArr[keyrdm]
data = json.dumps(params, separators=(',', ':',), ensure_ascii=False).encode('utf8')
data = pad(data, 16)
cipher = AES.new(key=key.encode('utf8'), mode=AES.MODE_ECB)
buf = cipher.encrypt(data)
return {
"params": base64.b64encode(buf).decode('utf8') + str(keyrdm),
"parKey": keyArr
}
def decrypt_free_login_params(ciphertext):
keyArr = ciphertext['parKey']
keyrdm = int(ciphertext['params'][-1])
key = keyArr[keyrdm]
data = ciphertext['params'][:-1]
data = base64.b64decode(data)
cipher = AES.new(key=key.encode('utf8'), mode=AES.MODE_ECB)
buf = cipher.decrypt(data)
buf = unpad(buf, 16)
return json.loads(buf)
def encrypt_req_params(params, jfid):
data = json.dumps(params, separators=(',', ':',), ensure_ascii=False).encode('utf8')
key = jfid[3:19]
cipher = ARC4.new(key=key.encode('utf8'))
buf = cipher.encrypt(data)
return base64.b64encode(buf).decode('utf8')
def decrypt_req_parmas(ciphertext, jfid):
data = base64.b64decode(ciphertext)
key = jfid[3:19]
cipher = ARC4.new(key=key.encode('utf8'))
buf = cipher.decrypt(data)
return json.loads(buf)
if __name__ == '__main__':
pass
| 29.830986 | 112 | 0.564212 |
acdeb740b059200558f5548f0615760a3817b300 | 25 | py | Python | mysite/__init__.py | kpi-web-guild/django-girls-blog-slsh1o | bb2f20a84ef431ed7adb94cbb231b5048c4346c7 | [
"MIT"
] | null | null | null | mysite/__init__.py | kpi-web-guild/django-girls-blog-slsh1o | bb2f20a84ef431ed7adb94cbb231b5048c4346c7 | [
"MIT"
] | 28 | 2020-01-29T00:28:30.000Z | 2020-12-06T20:59:36.000Z | mysite/__init__.py | kpi-web-guild/django-girls-blog-slsh1o | bb2f20a84ef431ed7adb94cbb231b5048c4346c7 | [
"MIT"
] | null | null | null | """Django mysite app."""
| 12.5 | 24 | 0.6 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.